Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py +3 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/associative_scan.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/flex_attention.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/associative_scan.py +216 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py +262 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py +362 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py +225 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/flex_attention.py +681 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/map.py +351 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py +171 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py +92 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py +119 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py +737 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py +212 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py +270 -0
- valley/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py +184 -0
- valley/lib/python3.10/site-packages/torch/futures/__init__.py +319 -0
- valley/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc +0 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/_functions.py +126 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py +270 -0
- valley/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py +111 -0
- valley/lib/python3.10/site-packages/torch/testing/__init__.py +4 -0
- valley/lib/python3.10/site-packages/torch/testing/_comparison.py +1574 -0
- valley/lib/python3.10/site-packages/torch/testing/_creation.py +268 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/__init__.py +0 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py +370 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py +635 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py +281 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py +1587 -0
- valley/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py +111 -0
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .cond import cond
|
| 2 |
+
from .while_loop import while_loop
|
| 3 |
+
from .flex_attention import flex_attention, flex_attention_backward
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (306 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/associative_scan.cpython-310.pyc
ADDED
|
Binary file (7.52 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc
ADDED
|
Binary file (6.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/effects.cpython-310.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/flex_attention.cpython-310.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/map.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/out_dtype.cpython-310.pyc
ADDED
|
Binary file (5.59 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/strict_mode.cpython-310.pyc
ADDED
|
Binary file (2.97 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/torchbind.cpython-310.pyc
ADDED
|
Binary file (3.54 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc
ADDED
|
Binary file (18.1 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (6.99 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/while_loop.cpython-310.pyc
ADDED
|
Binary file (8.48 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/wrap.cpython-310.pyc
ADDED
|
Binary file (7.59 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/associative_scan.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
import itertools
|
| 4 |
+
from typing import Callable, List
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
import torch._prims_common as utils
|
| 9 |
+
import torch._subclasses.functional_tensor
|
| 10 |
+
|
| 11 |
+
import torch.utils._pytree as pytree
|
| 12 |
+
|
| 13 |
+
from torch._C import DispatchKey
|
| 14 |
+
from torch._C._functorch import _add_batch_dim, get_unwrapped, maybe_get_bdim
|
| 15 |
+
from torch._higher_order_ops.utils import (
|
| 16 |
+
_set_compilation_env,
|
| 17 |
+
autograd_not_implemented,
|
| 18 |
+
reenter_make_fx,
|
| 19 |
+
unique_graph_id,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from torch._ops import HigherOrderOperator
|
| 23 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 24 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 25 |
+
disable_proxy_modes_tracing,
|
| 26 |
+
ProxyTorchDispatchMode,
|
| 27 |
+
track_tensor_tree,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
aten = torch._ops.ops.aten
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def wrap_combine_fn_flat(*args, combine_fn, spec, num_leaves):
|
| 34 |
+
assert len(args) == 2 * num_leaves
|
| 35 |
+
lhs = pytree.tree_unflatten(args[:num_leaves], spec)
|
| 36 |
+
rhs = pytree.tree_unflatten(args[num_leaves:], spec)
|
| 37 |
+
combined = combine_fn(lhs, rhs)
|
| 38 |
+
combined_leaves = pytree.tree_leaves(combined)
|
| 39 |
+
assert num_leaves == len(combined_leaves)
|
| 40 |
+
return combined_leaves
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def associative_scan(
|
| 44 |
+
combine_fn: Callable[[pytree.PyTree, pytree.PyTree], pytree.PyTree],
|
| 45 |
+
input: pytree.PyTree,
|
| 46 |
+
dim: int,
|
| 47 |
+
) -> torch.Tensor:
|
| 48 |
+
r"""
|
| 49 |
+
Performs an inclusive scan with an associative pointwise combine function.
|
| 50 |
+
|
| 51 |
+
.. warning::
|
| 52 |
+
`torch.associative_scan` is a prototype feature in PyTorch. It currently
|
| 53 |
+
does not support autograd and you may run into miscompiles.
|
| 54 |
+
Read more about feature classification at:
|
| 55 |
+
https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
|
| 56 |
+
|
| 57 |
+
This operator requires runtime code generation and so requires support for
|
| 58 |
+
``torch.compile``. Further, only CUDA device codegen is supported at the moment.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
combine_fn (Callable): A binary callable with type ``(Tensor, Tensor) -> Tensor``,
|
| 62 |
+
or if input is a pytree ``(pytree, pytree) -> pytree``.
|
| 63 |
+
This function must be pure, pointwise, and satisfy the associative property.
|
| 64 |
+
input (torch.Tensor): The input tensor, or nested pytree of tensors.
|
| 65 |
+
All inputs are expected to have the same shape.
|
| 66 |
+
dim (int): the dimension to scan over
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
Example::
|
| 70 |
+
|
| 71 |
+
def add(x: torch.Tensor, y: torch.Tensor):
|
| 72 |
+
return x + y
|
| 73 |
+
|
| 74 |
+
cumsum = associative_scan(add, x, dim)
|
| 75 |
+
|
| 76 |
+
"""
|
| 77 |
+
assert callable(combine_fn), "combine_fn must be a callable, but got {combine_fn}"
|
| 78 |
+
assert isinstance(dim, int), "dim must be an int, but got {type(dim)}"
|
| 79 |
+
|
| 80 |
+
if not torch._dynamo.is_compiling():
|
| 81 |
+
with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit():
|
| 82 |
+
return torch.compile(associative_scan, fullgraph=True)(
|
| 83 |
+
combine_fn, input, dim
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
leaves, spec = pytree.tree_flatten(input)
|
| 87 |
+
|
| 88 |
+
assert len(leaves) >= 1, "expected at least 1 input leaf"
|
| 89 |
+
assert all(
|
| 90 |
+
isinstance(x, torch.Tensor) for x in leaves
|
| 91 |
+
), "input leaves must be a Tensor"
|
| 92 |
+
shape = leaves[0].shape
|
| 93 |
+
ndim = len(shape)
|
| 94 |
+
dim = utils.canonicalize_dim(ndim, dim)
|
| 95 |
+
|
| 96 |
+
for x in leaves[1:]:
|
| 97 |
+
assert x.shape == shape, "All input tensors must have the same shape"
|
| 98 |
+
|
| 99 |
+
combine_fn = functools.partial(
|
| 100 |
+
wrap_combine_fn_flat, combine_fn=combine_fn, spec=spec, num_leaves=len(leaves)
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
result_flat = associative_scan_op(combine_fn, leaves, dim)
|
| 104 |
+
|
| 105 |
+
return pytree.tree_unflatten(result_flat, spec)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
associative_scan_op = HigherOrderOperator("associative_scan")
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def trace_associative_scan(
|
| 112 |
+
proxy_mode, func_overload, combine_fn: Callable, input: List[torch.Tensor], dim: int
|
| 113 |
+
):
|
| 114 |
+
with disable_proxy_modes_tracing():
|
| 115 |
+
sample_inputs = [
|
| 116 |
+
torch.full((), False, dtype=x.dtype, device=x.device)
|
| 117 |
+
for x in itertools.chain(input, input)
|
| 118 |
+
]
|
| 119 |
+
combine_graph = reenter_make_fx(combine_fn)(*sample_inputs)
|
| 120 |
+
|
| 121 |
+
outputs = None
|
| 122 |
+
for node in combine_graph.graph.nodes:
|
| 123 |
+
if node.op == "output":
|
| 124 |
+
assert outputs is None
|
| 125 |
+
assert len(node.args) == 1
|
| 126 |
+
outputs = node.args[0]
|
| 127 |
+
|
| 128 |
+
assert outputs is not None
|
| 129 |
+
assert len(outputs) == len(
|
| 130 |
+
input
|
| 131 |
+
), f"expected combine_fn to return {len(input)} results but got {len(outputs)}"
|
| 132 |
+
|
| 133 |
+
for i, o in zip(input, outputs):
|
| 134 |
+
o_meta = o.meta["tensor_meta"]
|
| 135 |
+
assert o_meta.dtype == i.dtype, (
|
| 136 |
+
f"combine_fn output type mismatch, expected {i.dtype} "
|
| 137 |
+
+ f"but got {o_meta.dtype}"
|
| 138 |
+
)
|
| 139 |
+
assert (
|
| 140 |
+
o_meta.shape == ()
|
| 141 |
+
), f"combine_fn must return a scalar tensor but got shape {o_meta.shape}"
|
| 142 |
+
assert (
|
| 143 |
+
o_meta.shape == ()
|
| 144 |
+
), f"combine_fn must return a scalar tensor but got shape {o_meta.shape}"
|
| 145 |
+
|
| 146 |
+
_, combine_graph_name = unique_graph_id(proxy_mode, prefix="scan_combine_graph")
|
| 147 |
+
|
| 148 |
+
proxy_mode.tracer.root.register_module(combine_graph_name, combine_graph)
|
| 149 |
+
|
| 150 |
+
args = (combine_graph, input, dim)
|
| 151 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args)
|
| 152 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 153 |
+
"call_function", func_overload, proxy_args, {}, name="associative_scan"
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
with disable_proxy_modes_tracing():
|
| 157 |
+
out = [aten.clone(x) for x in input]
|
| 158 |
+
|
| 159 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@associative_scan_op.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 163 |
+
def associative_scan_op_dense(combine_fn, input, dim):
|
| 164 |
+
raise NotImplementedError("associative_scan is not implemented for eager")
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
associative_scan_op.py_impl(DispatchKey.Autograd)(
|
| 168 |
+
autograd_not_implemented(associative_scan_op, deferred_error=True)
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@associative_scan_op.py_impl(ProxyTorchDispatchMode)
|
| 173 |
+
def associative_scan_proxy_mode(mode, combine_fn, input, dim):
|
| 174 |
+
if mode.enable_tracing:
|
| 175 |
+
return trace_associative_scan(mode, associative_scan_op, combine_fn, input, dim)
|
| 176 |
+
else:
|
| 177 |
+
return associative_scan_op(mode, associative_scan_op, combine_fn, input, dim)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
@associative_scan_op.py_impl(FakeTensorMode)
|
| 181 |
+
def assoiciative_scan_fake_tensor_mode(mode, combine_fn, input, dim):
|
| 182 |
+
with mode:
|
| 183 |
+
return [x.clone() for x in input]
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@associative_scan_op.py_functionalize_impl
|
| 187 |
+
def associative_scan_functionalize(ctx, combine_fn, input, dim):
|
| 188 |
+
unwrapped_input = ctx.unwrap_tensors(input)
|
| 189 |
+
with ctx.redispatch_to_next() as m:
|
| 190 |
+
ret = associative_scan_op(combine_fn, unwrapped_input, dim)
|
| 191 |
+
return ctx.wrap_tensors(ret)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@associative_scan_op.py_impl(torch._C._functorch.TransformType.Vmap)
|
| 195 |
+
def associative_scan_batch_rule(interpreter, input, dim, combine_fn):
|
| 196 |
+
input_ = [get_unwrapped(x) for x in input]
|
| 197 |
+
input_bdims = [maybe_get_bdim(x) for x in input]
|
| 198 |
+
|
| 199 |
+
batch_size = None
|
| 200 |
+
for inp, bdim in zip(input, input_bdims):
|
| 201 |
+
if bdim is not None:
|
| 202 |
+
batch_size = get_unwrapped(inp).shape[bdim]
|
| 203 |
+
|
| 204 |
+
assert batch_size
|
| 205 |
+
input_unwrapped = []
|
| 206 |
+
for x, bdim in zip(input, input_bdims):
|
| 207 |
+
unwrap = get_unwrapped(x)
|
| 208 |
+
if dim is None:
|
| 209 |
+
unwrap = unwrap.unsqueeze(0).expand(batch_size, *x.shape)
|
| 210 |
+
else:
|
| 211 |
+
unwrap = unwrap.movedim(bdim, 0)
|
| 212 |
+
input_unwrapped.append(unwrap)
|
| 213 |
+
|
| 214 |
+
res = associative_scan_op(combine_fn, input_unwrapped, dim + 1)
|
| 215 |
+
lvl = interpreter.level()
|
| 216 |
+
return [_add_batch_dim(x, 0, lvl) for x in res]
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.utils._pytree as pytree
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
from torch._C import DispatchKey
|
| 8 |
+
from torch._ops import HigherOrderOperator
|
| 9 |
+
from torch._prims_common import clone_preserve_strides
|
| 10 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 11 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 12 |
+
disable_proxy_modes_tracing,
|
| 13 |
+
ProxyTorchDispatchMode,
|
| 14 |
+
track_tensor_tree,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# NOTE: [auto-functionalizing custom ops]
|
| 19 |
+
# Users may wish to torch.compile custom ops that mutate their inputs.
|
| 20 |
+
# torch.compile will automatically support this op without anyone needing
|
| 21 |
+
# to provide a functionalization kernel for it. Here's how.
|
| 22 |
+
#
|
| 23 |
+
# Let's say we have a hypothetical mylib::sin_(Tensor(a!) x) -> ()
|
| 24 |
+
# op. First, when FakeTensor sees this op:
|
| 25 |
+
# - If the schema says it returns nothing, we can generate a trivial
|
| 26 |
+
# FakeTensor rule for it (that returns nothing).
|
| 27 |
+
# - Otherwise, the user needs to provide a FakeTensor impl (fake impl)
|
| 28 |
+
#
|
| 29 |
+
# Next, when Python FunctionalTensor sees the op, it will functionalize
|
| 30 |
+
# it by emitting a call to an auto_functionalize(op, ["x"], {"x": ...})
|
| 31 |
+
# HOP and replacing the mutated inputs with corresponding outputs of this HOP.
|
| 32 |
+
# This HOP effectively runs the functional version of the op when
|
| 33 |
+
# called: it clones inputs that will be mutated, runs the op, and
|
| 34 |
+
# then returns (output, Tensors with the new values)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class AutoFunctionalized(HigherOrderOperator):
|
| 38 |
+
"""auto_functionalized(_mutable_op, **kwargs)
|
| 39 |
+
|
| 40 |
+
This HOP runs a "functional" version of _mutable_op.
|
| 41 |
+
|
| 42 |
+
Concretely, it looks at all the arguments that are mutable through
|
| 43 |
+
_mutable_op's operator schema, clones those kwargs, runs
|
| 44 |
+
`out = _mutable_op(**kwargs)` with the cloned values, and then returns the
|
| 45 |
+
operator output concatenated with the cloned values that were mutated.
|
| 46 |
+
|
| 47 |
+
We have some restrictions on `_mutable_op`.
|
| 48 |
+
See `can_auto_functionalize` for the restrictions. We can likely lift
|
| 49 |
+
many of these if users request it.
|
| 50 |
+
|
| 51 |
+
The reason why _mutable_op is prefixed with an
|
| 52 |
+
underscore is to prevent collisions with kwarg names in **kwargs.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self):
|
| 56 |
+
super().__init__("auto_functionalized")
|
| 57 |
+
|
| 58 |
+
def __call__(
|
| 59 |
+
self,
|
| 60 |
+
_mutable_op: torch._ops.OpOverload,
|
| 61 |
+
**kwargs: Dict[str, Any],
|
| 62 |
+
) -> Tuple[Any, Tuple[Tensor, ...]]:
|
| 63 |
+
assert can_auto_functionalize(_mutable_op)
|
| 64 |
+
assert isinstance(kwargs, dict)
|
| 65 |
+
return super().__call__(_mutable_op, **kwargs)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
auto_functionalized = AutoFunctionalized()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def can_auto_functionalize(op: torch._ops.OperatorBase) -> bool:
|
| 72 |
+
if not isinstance(op, torch._ops.OpOverload):
|
| 73 |
+
return False
|
| 74 |
+
|
| 75 |
+
if torch._library.utils.is_builtin(op):
|
| 76 |
+
# We control the built-ins. These may (in rare cases)
|
| 77 |
+
# do input metadata mutation (which we have banned on custom ops)
|
| 78 |
+
return False
|
| 79 |
+
schema = op._schema
|
| 80 |
+
if not schema.is_mutable:
|
| 81 |
+
return False
|
| 82 |
+
schema = op._schema
|
| 83 |
+
|
| 84 |
+
for arg in schema.arguments:
|
| 85 |
+
if arg.alias_info is None:
|
| 86 |
+
continue
|
| 87 |
+
if not arg.alias_info.is_write:
|
| 88 |
+
continue
|
| 89 |
+
if type(arg.type) is torch.TensorType:
|
| 90 |
+
continue
|
| 91 |
+
if (
|
| 92 |
+
type(arg.type) is torch.OptionalType
|
| 93 |
+
and type(arg.type.getElementType()) is torch.TensorType
|
| 94 |
+
):
|
| 95 |
+
continue
|
| 96 |
+
# Not yet supported: other Tensor types. This includes things like
|
| 97 |
+
# Tensor[], Tensor?[], Tensor[]?.
|
| 98 |
+
return False
|
| 99 |
+
|
| 100 |
+
# The returns must not alias anything
|
| 101 |
+
for ret in schema.returns:
|
| 102 |
+
if ret.alias_info is None and type(ret.type) is torch.TensorType:
|
| 103 |
+
continue
|
| 104 |
+
# Not yet supported: List[Tensor] return.
|
| 105 |
+
return False
|
| 106 |
+
return True
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@auto_functionalized.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 110 |
+
def auto_functionalized_dense(
|
| 111 |
+
_mutable_op: torch._ops.OpOverload,
|
| 112 |
+
_only_clone_these_tensors: Optional[Tuple[str, ...]] = None,
|
| 113 |
+
**kwargs: Dict[str, Any],
|
| 114 |
+
) -> Tuple[Any, Tuple[Tensor, ...]]:
|
| 115 |
+
new_kwargs = dict(**kwargs)
|
| 116 |
+
result = []
|
| 117 |
+
|
| 118 |
+
_mutable_args_names = get_mutable_arg_names(_mutable_op)
|
| 119 |
+
for name in _mutable_args_names:
|
| 120 |
+
if (
|
| 121 |
+
_only_clone_these_tensors is not None
|
| 122 |
+
and name not in _only_clone_these_tensors
|
| 123 |
+
):
|
| 124 |
+
new_kwargs[name] = kwargs[name]
|
| 125 |
+
else:
|
| 126 |
+
new_kwargs[name] = (
|
| 127 |
+
clone_preserve_strides(kwargs[name])
|
| 128 |
+
if kwargs[name] is not None
|
| 129 |
+
else None
|
| 130 |
+
)
|
| 131 |
+
result.append(new_kwargs[name])
|
| 132 |
+
out = _mutable_op(**new_kwargs)
|
| 133 |
+
|
| 134 |
+
if isinstance(out, tuple):
|
| 135 |
+
return (*out, *result) # type: ignore[return-value]
|
| 136 |
+
else:
|
| 137 |
+
return (out, *result) # type: ignore[return-value]
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@auto_functionalized.py_impl(FakeTensorMode)
|
| 141 |
+
def auto_functionalized_fake(
|
| 142 |
+
mode,
|
| 143 |
+
_mutable_op: torch._ops.OpOverload,
|
| 144 |
+
**kwargs: Dict[str, Any],
|
| 145 |
+
) -> Tuple[Any, Tuple[Tensor, ...]]:
|
| 146 |
+
with mode:
|
| 147 |
+
result = auto_functionalized_dense(_mutable_op, **kwargs)
|
| 148 |
+
return result
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@auto_functionalized.py_impl(ProxyTorchDispatchMode)
|
| 152 |
+
def auto_functionalized_proxy(
|
| 153 |
+
mode,
|
| 154 |
+
_mutable_op: torch._ops.OpOverload,
|
| 155 |
+
**kwargs: Dict[str, Any],
|
| 156 |
+
) -> Tuple[Any, Tuple[Tensor, ...]]:
|
| 157 |
+
if not mode.enable_tracing:
|
| 158 |
+
return auto_functionalized(_mutable_op, **kwargs)
|
| 159 |
+
|
| 160 |
+
with disable_proxy_modes_tracing():
|
| 161 |
+
out = auto_functionalized(_mutable_op, **kwargs)
|
| 162 |
+
|
| 163 |
+
proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
|
| 164 |
+
out_proxy = mode.tracer.create_proxy(
|
| 165 |
+
"call_function",
|
| 166 |
+
auto_functionalized,
|
| 167 |
+
(_mutable_op,),
|
| 168 |
+
proxy_kwargs,
|
| 169 |
+
)
|
| 170 |
+
result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
|
| 171 |
+
return result
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
auto_functionalized.fallthrough(DispatchKey.AutogradCPU)
|
| 175 |
+
auto_functionalized.fallthrough(DispatchKey.AutogradCUDA)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def get_mutable_arg_names(op: torch._ops.OpOverload) -> List[str]:
|
| 179 |
+
"""
|
| 180 |
+
Returns the list of argument names that get mutated according to the
|
| 181 |
+
schema.
|
| 182 |
+
"""
|
| 183 |
+
mutable_args_names = [
|
| 184 |
+
arg.name
|
| 185 |
+
for arg in op._schema.arguments
|
| 186 |
+
if arg.alias_info is not None and arg.alias_info.is_write
|
| 187 |
+
]
|
| 188 |
+
return mutable_args_names
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def do_auto_functionalize(
|
| 192 |
+
op: torch._ops.OpOverload, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
| 193 |
+
) -> Any:
|
| 194 |
+
"""Functionalizes a call to op(*args, **kwargs) by emitting a call to
|
| 195 |
+
`outs = auto_functionalized(op, normalized_kwargs)`
|
| 196 |
+
and replacing the mutated (args, kwargs) with the corresponding outputs.
|
| 197 |
+
|
| 198 |
+
The normalized_kwargs are just the (args, kwargs), but all in kwarg form.
|
| 199 |
+
This makes handling easier for the auto_functionalized HOP.
|
| 200 |
+
"""
|
| 201 |
+
from torch._subclasses.functional_tensor import PythonFunctionalizeAPI
|
| 202 |
+
|
| 203 |
+
ctx = PythonFunctionalizeAPI()
|
| 204 |
+
|
| 205 |
+
# All of the (args, kwargs), but all as kwargs. The names for the
|
| 206 |
+
# args come from the schema. This makes it easier for us to work with them.
|
| 207 |
+
normalized_kwargs = {}
|
| 208 |
+
schema = op._schema
|
| 209 |
+
for idx, arg in enumerate(schema.arguments):
|
| 210 |
+
# NB: torch_dispatch kwargs are the args defined as kwarg-only in the schema
|
| 211 |
+
if arg.name in kwargs:
|
| 212 |
+
normalized_kwargs[arg.name] = kwargs[arg.name]
|
| 213 |
+
elif idx < len(args):
|
| 214 |
+
# if its out of bounds we don't need to do anything
|
| 215 |
+
# as it means the the optional arg was passed with its default
|
| 216 |
+
# value
|
| 217 |
+
normalized_kwargs[arg.name] = args[idx]
|
| 218 |
+
else:
|
| 219 |
+
normalized_kwargs[arg.name] = arg.default_value
|
| 220 |
+
|
| 221 |
+
unwrapped_kwargs = ctx.unwrap_tensors(normalized_kwargs) # type: ignore[arg-type]
|
| 222 |
+
with ctx.redispatch_to_next():
|
| 223 |
+
unwrapped_outs = auto_functionalized(
|
| 224 |
+
op, **unwrapped_kwargs # type: ignore[arg-type]
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# List of the name of args that get mutated (according to the schema)
|
| 228 |
+
mutable_args_names = get_mutable_arg_names(op)
|
| 229 |
+
|
| 230 |
+
unwrapped_actual_out: Union[Any, Tuple[Any]] = unwrapped_outs[
|
| 231 |
+
: -len(mutable_args_names)
|
| 232 |
+
]
|
| 233 |
+
unwrapped_mutable_out = unwrapped_outs[-len(mutable_args_names) :]
|
| 234 |
+
|
| 235 |
+
if len(op._schema.returns) == 0:
|
| 236 |
+
assert unwrapped_actual_out[0] is None
|
| 237 |
+
unwrapped_actual_out = None
|
| 238 |
+
elif len(op._schema.returns) == 1:
|
| 239 |
+
assert len(unwrapped_actual_out) == 1
|
| 240 |
+
unwrapped_actual_out = unwrapped_actual_out[0]
|
| 241 |
+
else:
|
| 242 |
+
assert len(unwrapped_actual_out) == len(op._schema.returns)
|
| 243 |
+
|
| 244 |
+
for name, unwrapped_out in zip(mutable_args_names, unwrapped_mutable_out):
|
| 245 |
+
# Can be None if input was `Tensor(a!)?`
|
| 246 |
+
if unwrapped_out is None:
|
| 247 |
+
continue
|
| 248 |
+
assert isinstance(unwrapped_out, torch.Tensor)
|
| 249 |
+
orig_arg = normalized_kwargs[name]
|
| 250 |
+
ctx.replace(orig_arg, unwrapped_out)
|
| 251 |
+
ctx.commit_update(orig_arg)
|
| 252 |
+
ctx.sync(orig_arg)
|
| 253 |
+
|
| 254 |
+
return ctx.wrap_tensors(unwrapped_actual_out) # type: ignore[arg-type]
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@auto_functionalized.py_functionalize_impl
|
| 258 |
+
def auto_functionalized_func(ctx, _mutable_op, **kwargs):
|
| 259 |
+
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
|
| 260 |
+
with ctx.redispatch_to_next():
|
| 261 |
+
result = auto_functionalized(_mutable_op, **unwrapped_kwargs)
|
| 262 |
+
return ctx.wrap_tensors(result)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/cond.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import contextlib
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch._subclasses.functional_tensor
|
| 6 |
+
|
| 7 |
+
import torch.utils._pytree as pytree
|
| 8 |
+
|
| 9 |
+
from torch._C import DispatchKey
|
| 10 |
+
from torch._C._functorch import (
|
| 11 |
+
_add_batch_dim,
|
| 12 |
+
get_unwrapped,
|
| 13 |
+
is_batchedtensor,
|
| 14 |
+
maybe_get_bdim,
|
| 15 |
+
)
|
| 16 |
+
from torch._functorch.utils import exposed_in
|
| 17 |
+
from torch._guards import detect_fake_mode
|
| 18 |
+
|
| 19 |
+
from torch._higher_order_ops.utils import (
|
| 20 |
+
_has_potential_branch_input_alias,
|
| 21 |
+
_has_potential_branch_input_mutation,
|
| 22 |
+
_set_compilation_env,
|
| 23 |
+
autograd_not_implemented,
|
| 24 |
+
reenter_make_fx,
|
| 25 |
+
unique_graph_id,
|
| 26 |
+
UnsupportedAliasMutationException,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from torch._ops import HigherOrderOperator
|
| 30 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 31 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 32 |
+
_temp_remove_pre_dispatch_torch_function_mode,
|
| 33 |
+
ProxyTorchDispatchMode,
|
| 34 |
+
track_tensor_tree,
|
| 35 |
+
)
|
| 36 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata
|
| 37 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@exposed_in("torch")
|
| 41 |
+
def cond(pred, true_fn, false_fn, operands):
|
| 42 |
+
r"""
|
| 43 |
+
Conditionally applies `true_fn` or `false_fn`.
|
| 44 |
+
|
| 45 |
+
.. warning::
|
| 46 |
+
`torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types and
|
| 47 |
+
doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch.
|
| 48 |
+
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
|
| 49 |
+
|
| 50 |
+
`cond` is structured control flow operator. That is, it is like a Python if-statement,
|
| 51 |
+
but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be
|
| 52 |
+
capturable using torch.compile and torch.export.
|
| 53 |
+
|
| 54 |
+
Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following::
|
| 55 |
+
|
| 56 |
+
def cond(pred, true_branch, false_branch, operands):
|
| 57 |
+
if pred:
|
| 58 |
+
return true_branch(*operands)
|
| 59 |
+
else:
|
| 60 |
+
return false_branch(*operands)
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element,
|
| 64 |
+
indicating which branch function to apply.
|
| 65 |
+
|
| 66 |
+
true_fn (Callable): A callable function (a -> b) that is within the
|
| 67 |
+
scope that is being traced.
|
| 68 |
+
|
| 69 |
+
false_fn (Callable): A callable function (a -> b) that is within the
|
| 70 |
+
scope that is being traced. The true branch and false branch must
|
| 71 |
+
have consistent input and outputs, meaning the inputs have to be
|
| 72 |
+
the same, and the outputs have to be the same type and shape.
|
| 73 |
+
|
| 74 |
+
operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the true/false functions.
|
| 75 |
+
|
| 76 |
+
Example::
|
| 77 |
+
|
| 78 |
+
def true_fn(x: torch.Tensor):
|
| 79 |
+
return x.cos()
|
| 80 |
+
def false_fn(x: torch.Tensor):
|
| 81 |
+
return x.sin()
|
| 82 |
+
return cond(x.shape[0] > 4, true_fn, false_fn, (x,))
|
| 83 |
+
|
| 84 |
+
Restrictions:
|
| 85 |
+
- The conditional statement (aka `pred`) must meet one of the following constraints:
|
| 86 |
+
|
| 87 |
+
- It's a `torch.Tensor` with only one element, and torch.bool dtype
|
| 88 |
+
|
| 89 |
+
- It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10`
|
| 90 |
+
|
| 91 |
+
- The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints:
|
| 92 |
+
|
| 93 |
+
- The function signature must match with operands.
|
| 94 |
+
|
| 95 |
+
- The function must return a tensor with the same metadata, e.g. shape,
|
| 96 |
+
dtype, etc.
|
| 97 |
+
|
| 98 |
+
- The function cannot have in-place mutations on inputs or global variables.
|
| 99 |
+
(Note: in-place tensor operations such as `add_` for intermediate results
|
| 100 |
+
are allowed in a branch)
|
| 101 |
+
|
| 102 |
+
.. warning::
|
| 103 |
+
Temporal Limitations:
|
| 104 |
+
|
| 105 |
+
- `cond` only supports **inference** right now. Autograd will be supported in the future.
|
| 106 |
+
|
| 107 |
+
- The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future.
|
| 108 |
+
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
if torch.compiler.is_dynamo_compiling():
|
| 112 |
+
return cond_op(pred, true_fn, false_fn, operands)
|
| 113 |
+
|
| 114 |
+
def _validate_input(pred, true_fn, false_fn, operands):
|
| 115 |
+
if not isinstance(pred, (bool, torch.Tensor, torch.SymBool)):
|
| 116 |
+
raise RuntimeError(f"Expected pred to be bool or tensor, but got {pred}.")
|
| 117 |
+
|
| 118 |
+
if isinstance(pred, torch.Tensor) and pred.numel() != 1:
|
| 119 |
+
raise RuntimeError(
|
| 120 |
+
f"Expected pred to be bool or single-element tensor, but got {pred}."
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
if not callable(true_fn) or not callable(false_fn):
|
| 124 |
+
raise RuntimeError("Expect both branches to be callbale.")
|
| 125 |
+
|
| 126 |
+
if not isinstance(operands, (tuple, list)) or pytree.tree_any(
|
| 127 |
+
lambda t: not isinstance(t, torch.Tensor), operands
|
| 128 |
+
):
|
| 129 |
+
raise RuntimeError(
|
| 130 |
+
"Expect operands to be a tuple of possibly nested dict/list/tuple that only"
|
| 131 |
+
f"consists of tensor leaves, but got {operands}."
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
_validate_input(pred, true_fn, false_fn, operands)
|
| 135 |
+
|
| 136 |
+
if not torch._dynamo.is_dynamo_supported():
|
| 137 |
+
raise RuntimeError("torch.cond requires dynamo support.")
|
| 138 |
+
|
| 139 |
+
with _set_compilation_env():
|
| 140 |
+
with torch._dynamo.utils.disable_cache_limit():
|
| 141 |
+
with _temp_remove_pre_dispatch_torch_function_mode():
|
| 142 |
+
return torch.compile(cond_op, backend="eager", fullgraph=True)(
|
| 143 |
+
pred, true_fn, false_fn, operands
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
"""
|
| 148 |
+
We're going to define a `cond_op` operation.
|
| 149 |
+
In order to do this, we need implementations for each of the dispatch keys.
|
| 150 |
+
"""
|
| 151 |
+
cond_op = HigherOrderOperator("cond")
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands):
|
| 155 |
+
assert isinstance(
|
| 156 |
+
operands, (list, tuple)
|
| 157 |
+
), "Cond operands must be a list or tuple of tensors"
|
| 158 |
+
assert all(
|
| 159 |
+
isinstance(o, torch.Tensor) for o in operands
|
| 160 |
+
), "Cond operands must be a list of tensors"
|
| 161 |
+
|
| 162 |
+
true_graph = reenter_make_fx(true_fn)(*operands)
|
| 163 |
+
false_graph = reenter_make_fx(false_fn)(*operands)
|
| 164 |
+
|
| 165 |
+
true_outs = []
|
| 166 |
+
false_outs = []
|
| 167 |
+
for node in true_graph.graph.nodes:
|
| 168 |
+
if node.op == "output":
|
| 169 |
+
true_outs.extend(node.args)
|
| 170 |
+
|
| 171 |
+
for node in false_graph.graph.nodes:
|
| 172 |
+
if node.op == "output":
|
| 173 |
+
false_outs.extend(node.args)
|
| 174 |
+
|
| 175 |
+
flat_true_outs = pytree.arg_tree_leaves(*true_outs)
|
| 176 |
+
flat_false_outs = pytree.arg_tree_leaves(*false_outs)
|
| 177 |
+
if len(flat_true_outs) != len(flat_false_outs):
|
| 178 |
+
raise torch._dynamo.exc.CondOpArgsMismatchError(
|
| 179 |
+
f"Expected to return same number of outputs but got:"
|
| 180 |
+
f"\n {true_fn.__name__} returns {len(flat_true_outs)} item(s)"
|
| 181 |
+
f"\n {false_fn.__name__} returns {len(flat_false_outs)} item(s)"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
for i in range(0, len(flat_true_outs)):
|
| 185 |
+
true_out = flat_true_outs[i]
|
| 186 |
+
false_out = flat_false_outs[i]
|
| 187 |
+
if true_out.meta["tensor_meta"] != false_out.meta["tensor_meta"]:
|
| 188 |
+
raise torch._dynamo.exc.CondOpArgsMismatchError(
|
| 189 |
+
f"Expected each tensor to have same metadata but got:"
|
| 190 |
+
f"\n {true_fn.__name__} returns {true_out.meta['tensor_meta']}"
|
| 191 |
+
f"\n {false_fn.__name__} returns {false_out.meta['tensor_meta']}"
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
i, true_name = unique_graph_id(proxy_mode, prefix="true_graph")
|
| 195 |
+
|
| 196 |
+
false_name = f"false_graph_{i}"
|
| 197 |
+
assert not hasattr(proxy_mode.tracer.root, false_name)
|
| 198 |
+
|
| 199 |
+
proxy_mode.tracer.root.register_module(true_name, true_graph)
|
| 200 |
+
proxy_mode.tracer.root.register_module(false_name, false_graph)
|
| 201 |
+
|
| 202 |
+
args = (pred, true_graph, false_graph, operands)
|
| 203 |
+
|
| 204 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args)
|
| 205 |
+
|
| 206 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 207 |
+
"call_function", func_overload, proxy_args, {}, name="conditional"
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# At this point, we're *guaranteed* that whether an output came from the
|
| 211 |
+
# true or false branch is indistinguishable. So, as this is just for tracing
|
| 212 |
+
# purposes, choose the true branch.
|
| 213 |
+
|
| 214 |
+
# TODO: the unbacked symbol allocations MUST NOT leak out, if you want to
|
| 215 |
+
# support this we need to arrange for the reenter_make_fx unbacked SymInts
|
| 216 |
+
# to be used, AND we need to arrange for some sort of unification between
|
| 217 |
+
# the two branches (but not really unification; e.g., if one branch
|
| 218 |
+
# returns [u0] and the other returns [5] this is OK but you MUST NOT
|
| 219 |
+
# conclude the result is 5. Also if one branch returns [3] and another
|
| 220 |
+
# branch returns [5] you can make it work by immediately allocating a new
|
| 221 |
+
# unbacked SymInt here).
|
| 222 |
+
ignore_fresh_unbacked = contextlib.nullcontext()
|
| 223 |
+
if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
|
| 224 |
+
ignore_fresh_unbacked = fake_mode.shape_env.ignore_fresh_unbacked_symbols()
|
| 225 |
+
|
| 226 |
+
# TODO: Uhh.... it shouldn't matter, but changing this to true_fn results in
|
| 227 |
+
# a FakeTensorMode error :
|
| 228 |
+
# `Current active mode <class 'torch._subclasses.fake_tensor.FakeTensorMode'> not registered`
|
| 229 |
+
# TODO Sometimes the operands are not completely FakeTensor, something seems went wrong in
|
| 230 |
+
# dynamo? Because of that it runs real computation sometimes and re-triggering downstream dispatch keys.
|
| 231 |
+
with ignore_fresh_unbacked:
|
| 232 |
+
out = false_fn(*operands)
|
| 233 |
+
|
| 234 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@cond_op.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 238 |
+
def cond_op_dense(pred, true_fn, false_fn, operands):
|
| 239 |
+
mode = _get_current_dispatch_mode()
|
| 240 |
+
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
|
| 241 |
+
if pred:
|
| 242 |
+
return true_fn(*operands)
|
| 243 |
+
else:
|
| 244 |
+
return false_fn(*operands)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
cond_op.py_impl(DispatchKey.Autograd)(
|
| 248 |
+
autograd_not_implemented(cond_op, deferred_error=True)
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
@cond_op.py_impl(ProxyTorchDispatchMode)
|
| 253 |
+
def inner(mode, pred, true_fn, false_fn, operands):
|
| 254 |
+
if mode.enable_tracing:
|
| 255 |
+
return trace_cond(mode, cond_op, pred, true_fn, false_fn, operands)
|
| 256 |
+
else:
|
| 257 |
+
return cond_op(pred, true_fn, false_fn, operands)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
@cond_op.py_impl(FakeTensorMode)
|
| 261 |
+
def cond_fake_tensor_mode(mode, pred, true_fn, false_fn, operands):
|
| 262 |
+
# Ignore here, because if you've gotten here but you're not manually
|
| 263 |
+
# tracing the inner graphs, that means that you intend to reuse the graph
|
| 264 |
+
# directly. Which means the old unbacked symbol bindings are appropriate.
|
| 265 |
+
# This strategy will not work if unbacked symbols can escape.
|
| 266 |
+
ignore_fresh_unbacked = contextlib.nullcontext()
|
| 267 |
+
if mode.shape_env:
|
| 268 |
+
ignore_fresh_unbacked = mode.shape_env.ignore_fresh_unbacked_symbols()
|
| 269 |
+
|
| 270 |
+
with mode, ignore_fresh_unbacked:
|
| 271 |
+
true_outs = true_fn(*operands)
|
| 272 |
+
flat_true_outs = pytree.tree_leaves(true_outs)
|
| 273 |
+
flat_false_outs = pytree.tree_leaves(false_fn(*operands))
|
| 274 |
+
if len(flat_true_outs) != len(flat_false_outs):
|
| 275 |
+
raise RuntimeError("Unmatched number of outputs from cond() branches.")
|
| 276 |
+
|
| 277 |
+
for true_out, false_out in zip(flat_true_outs, flat_false_outs):
|
| 278 |
+
true_meta = _extract_tensor_metadata(true_out)
|
| 279 |
+
false_meta = _extract_tensor_metadata(false_out)
|
| 280 |
+
if true_meta != false_meta:
|
| 281 |
+
raise torch._dynamo.exc.CondOpArgsMismatchError(
|
| 282 |
+
f"Expected each tensor to have same metadata but got:"
|
| 283 |
+
f"\n {true_fn.__name__} returns {true_meta}"
|
| 284 |
+
f"\n {false_fn.__name__} returns {false_meta}"
|
| 285 |
+
)
|
| 286 |
+
return true_outs
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
@cond_op.py_functionalize_impl
|
| 290 |
+
def cond_func(ctx, pred, true_fn, false_fn, inputs):
|
| 291 |
+
unwrapped_inputs = ctx.unwrap_tensors(inputs)
|
| 292 |
+
unwrapped_pred = ctx.unwrap_tensors(pred)
|
| 293 |
+
with ctx.redispatch_to_next() as m:
|
| 294 |
+
functional_true = ctx.functionalize(true_fn)
|
| 295 |
+
functional_false = ctx.functionalize(false_fn)
|
| 296 |
+
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
|
| 297 |
+
for branch in [functional_true, functional_false]:
|
| 298 |
+
if _has_potential_branch_input_mutation(
|
| 299 |
+
branch, unwrapped_inputs, pre_dispatch=pre_dispatch
|
| 300 |
+
):
|
| 301 |
+
raise UnsupportedAliasMutationException(
|
| 302 |
+
"One of torch.cond branch might be modifying the input!"
|
| 303 |
+
)
|
| 304 |
+
for branch in [true_fn, false_fn]:
|
| 305 |
+
if _has_potential_branch_input_alias(
|
| 306 |
+
branch, unwrapped_inputs, pre_dispatch=pre_dispatch
|
| 307 |
+
):
|
| 308 |
+
raise UnsupportedAliasMutationException(
|
| 309 |
+
"One of torch.cond branch might be aliasing the input!"
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
cond_return = cond_op(
|
| 313 |
+
unwrapped_pred, functional_true, functional_false, unwrapped_inputs
|
| 314 |
+
)
|
| 315 |
+
return ctx.wrap_tensors(cond_return)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@cond_op.py_impl(torch._C._functorch.TransformType.Vmap)
|
| 319 |
+
def cond_batch_rule(interpreter, pred, true_fn, false_fn, inputs):
|
| 320 |
+
assert isinstance(
|
| 321 |
+
inputs, (list, tuple)
|
| 322 |
+
), "Cond inputs must be a list or tuple of tensors"
|
| 323 |
+
assert all(
|
| 324 |
+
isinstance(i, torch.Tensor) for i in inputs
|
| 325 |
+
), "Cond inputs must be a list of tensors"
|
| 326 |
+
|
| 327 |
+
pred_ = get_unwrapped(pred) if is_batchedtensor(pred) else pred
|
| 328 |
+
|
| 329 |
+
# unbatched tensors are not vmapped
|
| 330 |
+
tensors, in_dims = zip(
|
| 331 |
+
*[
|
| 332 |
+
(get_unwrapped(t), maybe_get_bdim(t)) if is_batchedtensor(t) else (t, None)
|
| 333 |
+
for t in inputs
|
| 334 |
+
]
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
if is_batchedtensor(pred):
|
| 338 |
+
# prepend "pred" and vmap everything
|
| 339 |
+
tensors = (pred_,) + tensors
|
| 340 |
+
in_dims = (0,) + in_dims
|
| 341 |
+
|
| 342 |
+
def fn(p, *args):
|
| 343 |
+
t = true_fn(*args)
|
| 344 |
+
f = false_fn(*args)
|
| 345 |
+
return torch.where(p, t[0], f[0])
|
| 346 |
+
|
| 347 |
+
with interpreter.lower():
|
| 348 |
+
result = torch.vmap(fn, in_dims=in_dims)(*tensors)
|
| 349 |
+
|
| 350 |
+
else:
|
| 351 |
+
# predicate is known at this stage and it is a boolean expression or a
|
| 352 |
+
# tensor with one element.
|
| 353 |
+
true_fn = torch.vmap(true_fn, in_dims=in_dims)
|
| 354 |
+
false_fn = torch.vmap(false_fn, in_dims=in_dims)
|
| 355 |
+
|
| 356 |
+
with interpreter.lower():
|
| 357 |
+
result = cond_op(pred, true_fn, false_fn, tensors)
|
| 358 |
+
|
| 359 |
+
if not isinstance(result, tuple):
|
| 360 |
+
result = (result,)
|
| 361 |
+
lvl = interpreter.level()
|
| 362 |
+
return tuple([_add_batch_dim(r, 0, lvl) for r in result])
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/effects.py
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from enum import Enum
|
| 3 |
+
from typing import Any, Dict, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.utils._pytree as pytree
|
| 7 |
+
from torch._C import DispatchKey
|
| 8 |
+
from torch._ops import HigherOrderOperator
|
| 9 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 10 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 11 |
+
disable_proxy_modes_tracing,
|
| 12 |
+
ProxyTorchDispatchMode,
|
| 13 |
+
track_tensor_tree,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _EffectType(Enum):
|
| 18 |
+
ORDERED = "Ordered"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
SIDE_EFFECTS: Dict[torch._ops.OpOverload, _EffectType] = {
|
| 22 |
+
torch.ops.aten._print.default: _EffectType.ORDERED,
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _register_effectful_op(op: torch._ops.OpOverload, effect: _EffectType):
|
| 27 |
+
assert isinstance(op, torch._ops.OpOverload) and not has_aliasing(op)
|
| 28 |
+
if op in SIDE_EFFECTS and SIDE_EFFECTS[op] != effect:
|
| 29 |
+
raise RuntimeError(
|
| 30 |
+
f"Already registered effect type {SIDE_EFFECTS[op]} to op {op}, "
|
| 31 |
+
f"trying to register a different effect type {effect}."
|
| 32 |
+
)
|
| 33 |
+
SIDE_EFFECTS[op] = effect
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class WithEffects(HigherOrderOperator):
|
| 37 |
+
"""
|
| 38 |
+
with_effects(token, op, args, kwargs) -> (new_token, op_results)
|
| 39 |
+
|
| 40 |
+
This HOP helps ensure ordering between side effectful ops like prints or ops
|
| 41 |
+
using torchbind objects. This is needed to ensure a traced graph from
|
| 42 |
+
AOTAutograd is functional so that future optimization passes do not reorder
|
| 43 |
+
these operators. This is done through threading "effect tokens" through the
|
| 44 |
+
graph to enforce data dependence between side effectful ops.
|
| 45 |
+
|
| 46 |
+
The tokens are basically dummy values (torch.tensor([])). We create a token
|
| 47 |
+
per "effect type", which are enumerated in the _EffectType enum.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self):
|
| 51 |
+
super().__init__("with_effects")
|
| 52 |
+
|
| 53 |
+
def __call__(
|
| 54 |
+
self,
|
| 55 |
+
token,
|
| 56 |
+
op: torch._ops.OpOverload,
|
| 57 |
+
*args: Tuple[Any, ...],
|
| 58 |
+
**kwargs: Dict[str, Any],
|
| 59 |
+
) -> Tuple[Any, ...]:
|
| 60 |
+
assert isinstance(op, torch._ops.OpOverload)
|
| 61 |
+
assert not has_aliasing(op), "Ops with aliasing is not supported"
|
| 62 |
+
assert has_effects(op, args, kwargs)
|
| 63 |
+
assert isinstance(kwargs, dict)
|
| 64 |
+
return super().__call__(token, op, *args, **kwargs)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
with_effects = WithEffects()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def has_aliasing(op: torch._ops.OpOverload):
|
| 71 |
+
for arg in op._schema.arguments:
|
| 72 |
+
if arg.alias_info is not None:
|
| 73 |
+
return True
|
| 74 |
+
for arg in op._schema.returns:
|
| 75 |
+
if arg.alias_info is not None:
|
| 76 |
+
return True
|
| 77 |
+
return False
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def has_effects(op, args, kwargs) -> bool:
|
| 81 |
+
# Skip over the profiler's RecordFunction as they should not show up in the graph
|
| 82 |
+
_skip_ops = {torch.ops.profiler._record_function_exit._RecordFunction}
|
| 83 |
+
if op in _skip_ops:
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
return (
|
| 87 |
+
isinstance(op, torch._ops.OpOverload)
|
| 88 |
+
and not has_aliasing(op)
|
| 89 |
+
and get_effect_key(op, args, kwargs) is not None
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def get_effect_key(op, args, kwargs) -> Optional[_EffectType]:
|
| 94 |
+
if op in SIDE_EFFECTS:
|
| 95 |
+
return SIDE_EFFECTS[op]
|
| 96 |
+
|
| 97 |
+
for arg in args:
|
| 98 |
+
if isinstance(arg, torch.ScriptObject):
|
| 99 |
+
# Add it to the table so that next time we see the same op we don't
|
| 100 |
+
# have to parse through the args again
|
| 101 |
+
SIDE_EFFECTS[op] = _EffectType.ORDERED
|
| 102 |
+
return _EffectType.ORDERED
|
| 103 |
+
|
| 104 |
+
return None
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@with_effects.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 108 |
+
def with_effects_dense(
|
| 109 |
+
token: torch.Tensor,
|
| 110 |
+
op: torch._ops.OpOverload,
|
| 111 |
+
*args: Tuple[Any, ...],
|
| 112 |
+
**kwargs: Dict[str, Any],
|
| 113 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 114 |
+
out = op(*args, **kwargs)
|
| 115 |
+
new_token = torch.tensor([])
|
| 116 |
+
if isinstance(out, tuple):
|
| 117 |
+
return (new_token, *out)
|
| 118 |
+
return (new_token, out)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@with_effects.py_impl(FakeTensorMode)
|
| 122 |
+
def with_effects_fake(
|
| 123 |
+
mode,
|
| 124 |
+
token: torch.Tensor,
|
| 125 |
+
op: torch._ops.OpOverload,
|
| 126 |
+
*args: Tuple[Any, ...],
|
| 127 |
+
**kwargs: Dict[str, Any],
|
| 128 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 129 |
+
with mode:
|
| 130 |
+
result = with_effects_dense(token, op, *args, **kwargs)
|
| 131 |
+
return result
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@with_effects.py_impl(ProxyTorchDispatchMode)
|
| 135 |
+
def with_effects_proxy(
|
| 136 |
+
mode,
|
| 137 |
+
token: torch.Tensor,
|
| 138 |
+
op: torch._ops.OpOverload,
|
| 139 |
+
*args: Tuple[Any, ...],
|
| 140 |
+
**kwargs: Dict[str, Any],
|
| 141 |
+
) -> Tuple[torch.Tensor, ...]:
|
| 142 |
+
if not mode.enable_tracing:
|
| 143 |
+
return with_effects(token, op, *args, **kwargs)
|
| 144 |
+
|
| 145 |
+
with disable_proxy_modes_tracing():
|
| 146 |
+
out = with_effects(token, op, *args, **kwargs)
|
| 147 |
+
|
| 148 |
+
proxy_token = mode.tracer.unwrap_proxy(token)
|
| 149 |
+
proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
|
| 150 |
+
proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
|
| 151 |
+
|
| 152 |
+
out_proxy = mode.tracer.create_proxy(
|
| 153 |
+
"call_function",
|
| 154 |
+
with_effects,
|
| 155 |
+
(proxy_token, op, *proxy_args),
|
| 156 |
+
proxy_kwargs,
|
| 157 |
+
)
|
| 158 |
+
result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
|
| 159 |
+
return result
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
with_effects.fallthrough(DispatchKey.AutogradCPU)
|
| 163 |
+
with_effects.fallthrough(DispatchKey.AutogradCUDA)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def handle_effects(
|
| 167 |
+
allow_token_discovery: bool,
|
| 168 |
+
tokens: Dict[_EffectType, torch.Tensor],
|
| 169 |
+
op: torch._ops.OpOverload,
|
| 170 |
+
args: Tuple[Any, ...],
|
| 171 |
+
kwargs: Dict[str, Any],
|
| 172 |
+
) -> Any:
|
| 173 |
+
"""
|
| 174 |
+
Args:
|
| 175 |
+
allow_token_discovery: Whether or not we are discovering tokens. If this
|
| 176 |
+
is true, we will create a token for every side effect type seen that
|
| 177 |
+
does not have a token assigned yet. If this is false, the tokens
|
| 178 |
+
should've all been created ahead of time, so we will error if there is
|
| 179 |
+
no token mapping to every effect type.
|
| 180 |
+
|
| 181 |
+
tokens: Map of effect type to tokens. This is to chain operators of the
|
| 182 |
+
same effects together so that they do not get reordered in later
|
| 183 |
+
optimization passes.
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
# Get a token. We can't do `tokens.get(op, torch.tensor([]))` because
|
| 187 |
+
# this will create an empty tensor during proxy mode tracing if the token
|
| 188 |
+
# doesn't exist. But the tokens should always exist during proxy mode tracing.
|
| 189 |
+
key = get_effect_key(op, args, kwargs)
|
| 190 |
+
assert key is not None
|
| 191 |
+
if key not in tokens:
|
| 192 |
+
assert (
|
| 193 |
+
allow_token_discovery
|
| 194 |
+
), f"Could not find a token for effect {key} which came from the function {op}"
|
| 195 |
+
tokens[key] = torch.tensor([])
|
| 196 |
+
token = tokens[key]
|
| 197 |
+
|
| 198 |
+
from torch._subclasses.functional_tensor import PythonFunctionalizeAPI
|
| 199 |
+
|
| 200 |
+
ctx = PythonFunctionalizeAPI()
|
| 201 |
+
|
| 202 |
+
unwrapped_token = ctx.unwrap_tensors([token])[0] # type: ignore[arg-type]
|
| 203 |
+
unwrapped_args = ctx.unwrap_tensors(args) # type: ignore[arg-type]
|
| 204 |
+
unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type]
|
| 205 |
+
with ctx.redispatch_to_next():
|
| 206 |
+
(new_token, *unwrapped_outs) = with_effects(
|
| 207 |
+
unwrapped_token, op, *unwrapped_args, **unwrapped_kwargs # type: ignore[arg-type]
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
if len(op._schema.returns) == 0:
|
| 211 |
+
assert unwrapped_outs[0] is None
|
| 212 |
+
unwrapped_outs = None # type: ignore[assignment]
|
| 213 |
+
elif len(op._schema.returns) == 1:
|
| 214 |
+
assert len(unwrapped_outs) == 1
|
| 215 |
+
unwrapped_outs = unwrapped_outs[0]
|
| 216 |
+
else:
|
| 217 |
+
assert len(unwrapped_outs) == len(op._schema.returns)
|
| 218 |
+
|
| 219 |
+
# Add the newly created token into the tokens map for a following call to
|
| 220 |
+
# use this token.
|
| 221 |
+
wrapped_token = ctx.wrap_tensors(new_token)
|
| 222 |
+
assert isinstance(wrapped_token, torch.Tensor)
|
| 223 |
+
tokens[key] = wrapped_token
|
| 224 |
+
|
| 225 |
+
return ctx.wrap_tensors(unwrapped_outs) # type: ignore[arg-type]
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/flex_attention.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Any, Callable, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.utils._pytree as pytree
|
| 6 |
+
from torch._C import DispatchKey
|
| 7 |
+
from torch._higher_order_ops.utils import (
|
| 8 |
+
_has_potential_branch_input_mutation,
|
| 9 |
+
autograd_not_implemented,
|
| 10 |
+
reenter_make_fx,
|
| 11 |
+
UnsupportedAliasMutationException,
|
| 12 |
+
)
|
| 13 |
+
from torch._ops import HigherOrderOperator
|
| 14 |
+
from torch._subclasses import FakeTensorMode
|
| 15 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 16 |
+
make_fx,
|
| 17 |
+
ProxyTorchDispatchMode,
|
| 18 |
+
track_tensor_tree,
|
| 19 |
+
)
|
| 20 |
+
from torch.fx.graph_module import GraphModule
|
| 21 |
+
|
| 22 |
+
from torch.overrides import TorchFunctionMode
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def transform_getitem_args(x: torch.Tensor, index_args) -> Tuple[Any, ...]:
|
| 26 |
+
if isinstance(index_args, tuple):
|
| 27 |
+
return (x, list(index_args))
|
| 28 |
+
elif not isinstance(index_args, (list, tuple)):
|
| 29 |
+
return (x, [index_args])
|
| 30 |
+
return (x, index_args)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TransformGetItemToIndex(TorchFunctionMode):
|
| 34 |
+
# This is needed since we want to support calling
|
| 35 |
+
# A[q_idx], where q_idx is a scalar tensor in score_mod.
|
| 36 |
+
# Today, when q_idx is a scalar tensor, we implicitly convert it to a python
|
| 37 |
+
# scalar and create a view. We do not want that behavior in this case, so we
|
| 38 |
+
# use this torchfunctionmode to override that behavior for score_mod
|
| 39 |
+
# wherever we're running it.
|
| 40 |
+
def __torch_function__(self, func, types, args, kwargs=None):
|
| 41 |
+
if func == torch.Tensor.__getitem__:
|
| 42 |
+
return torch.ops.aten.index(*transform_getitem_args(*args))
|
| 43 |
+
return func(*args, **(kwargs or {}))
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class FlexAttentionHOP(HigherOrderOperator):
|
| 47 |
+
def __init__(self):
|
| 48 |
+
super().__init__("flex_attention")
|
| 49 |
+
|
| 50 |
+
def __call__(
|
| 51 |
+
self,
|
| 52 |
+
query: torch.Tensor,
|
| 53 |
+
key: torch.Tensor,
|
| 54 |
+
value: torch.Tensor,
|
| 55 |
+
score_mod: Callable,
|
| 56 |
+
*other_buffers: torch.Tensor,
|
| 57 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 58 |
+
if not all(isinstance(buf, torch.Tensor) for buf in other_buffers):
|
| 59 |
+
raise RuntimeError("Other buffers must be tensors.")
|
| 60 |
+
return super().__call__(query, key, value, score_mod, *other_buffers)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
flex_attention = FlexAttentionHOP()
|
| 64 |
+
flex_attention.__module__ = "torch.ops.higher_order"
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class FlexAttentionBackwardHOP(HigherOrderOperator):
|
| 68 |
+
def __init__(self):
|
| 69 |
+
super().__init__("flex_attention_backward")
|
| 70 |
+
|
| 71 |
+
def __call__(
|
| 72 |
+
self,
|
| 73 |
+
query: torch.Tensor,
|
| 74 |
+
key: torch.Tensor,
|
| 75 |
+
value: torch.Tensor,
|
| 76 |
+
out: torch.Tensor,
|
| 77 |
+
logsumexp: torch.Tensor,
|
| 78 |
+
grad_out: torch.Tensor,
|
| 79 |
+
fw_graph: Union[Callable, GraphModule],
|
| 80 |
+
joint_graph: GraphModule,
|
| 81 |
+
*other_buffers: torch.Tensor,
|
| 82 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 83 |
+
if not all(isinstance(buf, torch.Tensor) for buf in other_buffers):
|
| 84 |
+
raise RuntimeError("Other buffers must be tensors.")
|
| 85 |
+
return super().__call__(
|
| 86 |
+
query,
|
| 87 |
+
key,
|
| 88 |
+
value,
|
| 89 |
+
out,
|
| 90 |
+
logsumexp,
|
| 91 |
+
grad_out,
|
| 92 |
+
fw_graph,
|
| 93 |
+
joint_graph,
|
| 94 |
+
*other_buffers,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
flex_attention_backward = FlexAttentionBackwardHOP()
|
| 99 |
+
flex_attention_backward.__module__ = "torch.ops.higher_order"
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def math_attention(
|
| 103 |
+
query: torch.Tensor,
|
| 104 |
+
key: torch.Tensor,
|
| 105 |
+
value: torch.Tensor,
|
| 106 |
+
score_mod: Callable,
|
| 107 |
+
*other_buffers: torch.Tensor,
|
| 108 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 109 |
+
"""Eager implementation
|
| 110 |
+
|
| 111 |
+
This implementation uses vmap to vectorize the score_mod function over the batch, head, m, and n dimensions.
|
| 112 |
+
We then apply the vectorized score_mod function to the scores matrix. Each wrap of vmap applies one of the
|
| 113 |
+
batch, head, m, or n dimensions. We need to apply vmap 4 times to vectorized over all 4 dimensions.
|
| 114 |
+
|
| 115 |
+
Args:
|
| 116 |
+
query: The query tensor
|
| 117 |
+
key: The key tensor
|
| 118 |
+
value: The value tensor
|
| 119 |
+
score_mod: The score_mod function
|
| 120 |
+
other_buffers: Other buffers that are passed to the score_mod function
|
| 121 |
+
"""
|
| 122 |
+
working_precision = torch.float64 if query.dtype == torch.float64 else torch.float32
|
| 123 |
+
|
| 124 |
+
scores = (query @ key.transpose(-2, -1)).to(dtype=working_precision)
|
| 125 |
+
|
| 126 |
+
b = torch.arange(0, scores.size(0), device=scores.device)
|
| 127 |
+
h = torch.arange(0, scores.size(1), device=scores.device)
|
| 128 |
+
m = torch.arange(0, scores.size(2), device=scores.device)
|
| 129 |
+
n = torch.arange(0, scores.size(3), device=scores.device)
|
| 130 |
+
|
| 131 |
+
in_dim_buffers = (None,) * len(other_buffers)
|
| 132 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, None, None, None, 0) + in_dim_buffers)
|
| 133 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, None, None, 0, None) + in_dim_buffers)
|
| 134 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers)
|
| 135 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers)
|
| 136 |
+
|
| 137 |
+
# todo: We wouldn't need these overrides in this file if Dynamo always did the
|
| 138 |
+
# rewriting.
|
| 139 |
+
with TransformGetItemToIndex():
|
| 140 |
+
scores = score_mod(scores, b, h, m, n, *other_buffers).to(working_precision)
|
| 141 |
+
|
| 142 |
+
# TODO Unconditionally return logsumexp for backwards
|
| 143 |
+
# if any(t.requires_grad for t in (query, key, value)):
|
| 144 |
+
logsumexp = scores.logsumexp(dim=-1)
|
| 145 |
+
|
| 146 |
+
scores = scores.softmax(dim=-1)
|
| 147 |
+
|
| 148 |
+
return scores.to(query.dtype) @ value, logsumexp
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@flex_attention.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 152 |
+
def sdpa_dense(
|
| 153 |
+
query: torch.Tensor,
|
| 154 |
+
key: torch.Tensor,
|
| 155 |
+
value: torch.Tensor,
|
| 156 |
+
score_mod: Callable,
|
| 157 |
+
*other_buffers: torch.Tensor,
|
| 158 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 159 |
+
out, lse = math_attention(query, key, value, score_mod, *other_buffers)
|
| 160 |
+
out = out.contiguous()
|
| 161 |
+
return out, lse
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def trace_flex_attention(
|
| 165 |
+
proxy_mode: ProxyTorchDispatchMode,
|
| 166 |
+
query: torch.Tensor,
|
| 167 |
+
key: torch.Tensor,
|
| 168 |
+
value: torch.Tensor,
|
| 169 |
+
score_mod: Callable,
|
| 170 |
+
*other_buffers: torch.Tensor,
|
| 171 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 172 |
+
"""Traces the flex_attention operator with the given score_mod function and other_buffers.
|
| 173 |
+
|
| 174 |
+
Trace SDPA will call make_fx with "fake" example vals and then trace the score_mod function
|
| 175 |
+
This will produce a GraphModule that will be stored on the root tracer as "sdpa_score". We
|
| 176 |
+
access this graph module in inductor to inline the score_mod function to the triton template.
|
| 177 |
+
"""
|
| 178 |
+
example_out = flex_attention(query, key, value, score_mod, *other_buffers)
|
| 179 |
+
example_vals = [
|
| 180 |
+
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
|
| 181 |
+
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
|
| 182 |
+
with TransformGetItemToIndex():
|
| 183 |
+
score_graph = reenter_make_fx(score_mod)(*example_vals, *other_buffers)
|
| 184 |
+
qualname = proxy_mode.tracer.get_fresh_qualname("sdpa_score")
|
| 185 |
+
proxy_mode.tracer.root.register_module(qualname, score_graph)
|
| 186 |
+
node_args = (query, key, value, score_graph, *other_buffers)
|
| 187 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
|
| 188 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 189 |
+
"call_function", flex_attention, proxy_args, {}
|
| 190 |
+
)
|
| 191 |
+
return track_tensor_tree(
|
| 192 |
+
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@flex_attention.py_impl(ProxyTorchDispatchMode)
|
| 197 |
+
def flex_attention_proxy_torch_dispatch_mode(
|
| 198 |
+
mode: ProxyTorchDispatchMode,
|
| 199 |
+
query: torch.Tensor,
|
| 200 |
+
key: torch.Tensor,
|
| 201 |
+
value: torch.Tensor,
|
| 202 |
+
score_mod: Callable,
|
| 203 |
+
*other_buffers: torch.Tensor,
|
| 204 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 205 |
+
assert mode is not None, "Mode should always be enabled for python fallback key"
|
| 206 |
+
if mode.enable_tracing:
|
| 207 |
+
return trace_flex_attention(mode, query, key, value, score_mod, *other_buffers)
|
| 208 |
+
else:
|
| 209 |
+
return flex_attention(query, key, value, score_mod, *other_buffers)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
@flex_attention.py_functionalize_impl
|
| 213 |
+
def flex_attention_functionalize(
|
| 214 |
+
ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI,
|
| 215 |
+
query: torch.Tensor,
|
| 216 |
+
key: torch.Tensor,
|
| 217 |
+
value: torch.Tensor,
|
| 218 |
+
score_mod: Callable,
|
| 219 |
+
*other_buffers: torch.Tensor,
|
| 220 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 221 |
+
"""Defines the functionalization rules for the flex_attention operator.
|
| 222 |
+
|
| 223 |
+
Write now we are unwrapping each tensor and then redispatching to the next, however we want to
|
| 224 |
+
guard against any mutations in the score_mod function, to the other_buffers since those
|
| 225 |
+
are free variables.
|
| 226 |
+
"""
|
| 227 |
+
query_unwrapped = ctx.unwrap_tensors(query)
|
| 228 |
+
key_unwrapped = ctx.unwrap_tensors(key)
|
| 229 |
+
value_unwrapped = ctx.unwrap_tensors(value)
|
| 230 |
+
other_buffers_unwrapped = ctx.unwrap_tensors(other_buffers)
|
| 231 |
+
|
| 232 |
+
# Appease the mypy overlords
|
| 233 |
+
assert isinstance(query_unwrapped, torch.Tensor)
|
| 234 |
+
assert isinstance(key_unwrapped, torch.Tensor)
|
| 235 |
+
assert isinstance(value_unwrapped, torch.Tensor)
|
| 236 |
+
assert isinstance(other_buffers_unwrapped, tuple)
|
| 237 |
+
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
|
| 238 |
+
|
| 239 |
+
example_vals = (
|
| 240 |
+
[torch.zeros((), dtype=query.dtype)]
|
| 241 |
+
+ [torch.zeros((), dtype=torch.int) for _ in range(4)]
|
| 242 |
+
+ list(other_buffers_unwrapped)
|
| 243 |
+
)
|
| 244 |
+
with ctx.redispatch_to_next() as m:
|
| 245 |
+
functional_score_mod = ctx.functionalize(score_mod)
|
| 246 |
+
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
|
| 247 |
+
with TransformGetItemToIndex():
|
| 248 |
+
mutates = _has_potential_branch_input_mutation(
|
| 249 |
+
functional_score_mod, example_vals, pre_dispatch
|
| 250 |
+
)
|
| 251 |
+
# The only care about mutations of existing buffers since we can't replay these.
|
| 252 |
+
# However, we can just error if anything is detected
|
| 253 |
+
if mutates:
|
| 254 |
+
raise UnsupportedAliasMutationException("Mutations detected in score_mod")
|
| 255 |
+
|
| 256 |
+
out = flex_attention(
|
| 257 |
+
query_unwrapped,
|
| 258 |
+
key_unwrapped,
|
| 259 |
+
value_unwrapped,
|
| 260 |
+
functional_score_mod,
|
| 261 |
+
*other_buffers_unwrapped,
|
| 262 |
+
)
|
| 263 |
+
return ctx.wrap_tensors(out) # type: ignore[return-value, arg-type]
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
@flex_attention.py_impl(FakeTensorMode)
|
| 267 |
+
def flex_attention_fake_tensor_mode(
|
| 268 |
+
mode: FakeTensorMode,
|
| 269 |
+
query: torch.Tensor,
|
| 270 |
+
key: torch.Tensor,
|
| 271 |
+
value: torch.Tensor,
|
| 272 |
+
score_mod: Callable,
|
| 273 |
+
*other_buffers: Tuple[torch.Tensor, ...],
|
| 274 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 275 |
+
with mode:
|
| 276 |
+
batch_size, num_heads, seq_len_q, _ = query.shape
|
| 277 |
+
logsumexp = query.new_empty(
|
| 278 |
+
batch_size, num_heads, seq_len_q, dtype=torch.float32
|
| 279 |
+
)
|
| 280 |
+
return torch.empty_like(query, memory_format=torch.contiguous_format), logsumexp
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
# ---------------------------- Autograd Implementation ----------------------------
|
| 284 |
+
def create_fw_bw_graph(score_mod, index_values, other_buffers):
|
| 285 |
+
# See Note:[HOP create fw_bw graph]
|
| 286 |
+
|
| 287 |
+
# All of these imports need to be here in order to avoid circular dependencies
|
| 288 |
+
from torch._dispatch.python import suspend_functionalization
|
| 289 |
+
from torch._functorch.aot_autograd import AOTConfig, create_joint
|
| 290 |
+
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
|
| 291 |
+
|
| 292 |
+
from torch._subclasses.functional_tensor import disable_functional_mode
|
| 293 |
+
from torch.fx.experimental.proxy_tensor import disable_proxy_modes_tracing
|
| 294 |
+
|
| 295 |
+
dummy_aot_config = AOTConfig(
|
| 296 |
+
fw_compiler=None, # type: ignore[arg-type]
|
| 297 |
+
bw_compiler=None, # type: ignore[arg-type]
|
| 298 |
+
partition_fn=None, # type: ignore[arg-type]
|
| 299 |
+
decompositions={},
|
| 300 |
+
num_params_buffers=0,
|
| 301 |
+
aot_id=0,
|
| 302 |
+
keep_inference_input_mutations=False,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
with suspend_functionalization(), disable_functional_mode():
|
| 306 |
+
with disable_proxy_modes_tracing():
|
| 307 |
+
|
| 308 |
+
def _from_fun(t):
|
| 309 |
+
return torch.empty_strided(
|
| 310 |
+
t.size(),
|
| 311 |
+
t.stride(),
|
| 312 |
+
device=t.device,
|
| 313 |
+
dtype=t.dtype,
|
| 314 |
+
requires_grad=t.requires_grad,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
# If someone runs this hop under the default compiler backend ("eager")
|
| 318 |
+
# Then this path will be run with the actual user inputs. We convert them
|
| 319 |
+
# to fake tensors in order to not perform any actual compute.
|
| 320 |
+
from torch._guards import detect_fake_mode
|
| 321 |
+
|
| 322 |
+
fake_mode = detect_fake_mode(index_values)
|
| 323 |
+
if fake_mode is None:
|
| 324 |
+
fake_mode = FakeTensorMode(allow_non_fake_inputs=True)
|
| 325 |
+
|
| 326 |
+
with fake_mode:
|
| 327 |
+
unwrapped_score_mod_indexes = pytree.tree_map(_from_fun, index_values)
|
| 328 |
+
unwrapped_other_buffers = pytree.tree_map(_from_fun, other_buffers)
|
| 329 |
+
|
| 330 |
+
assert all(isinstance(t, FakeTensor) for t in unwrapped_score_mod_indexes)
|
| 331 |
+
assert all(isinstance(t, FakeTensor) for t in unwrapped_other_buffers)
|
| 332 |
+
|
| 333 |
+
example_flat_out = pytree.tree_map(
|
| 334 |
+
_from_fun,
|
| 335 |
+
score_mod(*unwrapped_score_mod_indexes, *unwrapped_other_buffers),
|
| 336 |
+
)
|
| 337 |
+
if not isinstance(example_flat_out, torch.Tensor):
|
| 338 |
+
raise RuntimeError(
|
| 339 |
+
"Expected output of score_mod to be a tensor."
|
| 340 |
+
f"Got type {type(example_flat_out)}."
|
| 341 |
+
)
|
| 342 |
+
example_grad = _from_fun(example_flat_out)
|
| 343 |
+
|
| 344 |
+
def joint_f(score, b, h, m, n, example_grad, *other_buffers):
|
| 345 |
+
def fw_with_masks(*args):
|
| 346 |
+
fw_out = score_mod(*args)
|
| 347 |
+
out_requires_grad = fw_out.requires_grad
|
| 348 |
+
return ((fw_out,), (out_requires_grad,))
|
| 349 |
+
|
| 350 |
+
joint = create_joint(fw_with_masks, aot_config=dummy_aot_config)
|
| 351 |
+
args = [score, b, h, m, n] + list(other_buffers)
|
| 352 |
+
optional_grad = [example_grad] if example_grad.requires_grad else []
|
| 353 |
+
_, grads = joint(args, optional_grad)
|
| 354 |
+
|
| 355 |
+
return grads
|
| 356 |
+
|
| 357 |
+
joint_graph = make_fx(joint_f)(
|
| 358 |
+
*unwrapped_score_mod_indexes, example_grad, *unwrapped_other_buffers
|
| 359 |
+
)
|
| 360 |
+
return score_mod, joint_graph
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
class FlexAttentionAutogradOp(torch.autograd.Function):
|
| 364 |
+
@staticmethod
|
| 365 |
+
def forward(
|
| 366 |
+
ctx, query, key, value, fw_graph, joint_graph, *other_buffers
|
| 367 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 368 |
+
any_buffer_requires_grad = any(buffer.requires_grad for buffer in other_buffers)
|
| 369 |
+
assert (
|
| 370 |
+
not any_buffer_requires_grad
|
| 371 |
+
), "Captured buffers that require grad are not yet supported."
|
| 372 |
+
ctx._fw_graph = fw_graph
|
| 373 |
+
ctx._joint_graph = joint_graph
|
| 374 |
+
with torch._C._AutoDispatchBelowAutograd():
|
| 375 |
+
out, logsumexp = flex_attention(query, key, value, fw_graph, *other_buffers)
|
| 376 |
+
|
| 377 |
+
ctx.save_for_backward(query, key, value, out, logsumexp, *other_buffers)
|
| 378 |
+
return out, logsumexp
|
| 379 |
+
|
| 380 |
+
@staticmethod
|
| 381 |
+
def backward(ctx, grad_out, logsumexp_grad):
|
| 382 |
+
fw_args = ctx.saved_tensors
|
| 383 |
+
query, key, value, out, logsumexp, *other_buffers = fw_args
|
| 384 |
+
fw_graph = ctx._fw_graph
|
| 385 |
+
joint_graph = ctx._joint_graph
|
| 386 |
+
# We have asserted that other_buffers do not require grad in the forward
|
| 387 |
+
none_grads = [None] * (2 + len(other_buffers))
|
| 388 |
+
grad_query, grad_key, grad_value = flex_attention_backward(
|
| 389 |
+
query,
|
| 390 |
+
key,
|
| 391 |
+
value,
|
| 392 |
+
out,
|
| 393 |
+
logsumexp,
|
| 394 |
+
grad_out,
|
| 395 |
+
fw_graph,
|
| 396 |
+
joint_graph,
|
| 397 |
+
*other_buffers,
|
| 398 |
+
)
|
| 399 |
+
return grad_query, grad_key, grad_value, *none_grads
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
@flex_attention.py_impl(DispatchKey.Autograd)
|
| 403 |
+
def flex_attention_autograd(
|
| 404 |
+
query: torch.Tensor,
|
| 405 |
+
key: torch.Tensor,
|
| 406 |
+
value: torch.Tensor,
|
| 407 |
+
score_mod: Callable,
|
| 408 |
+
*other_buffers: Tuple[torch.Tensor, ...],
|
| 409 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 410 |
+
with TransformGetItemToIndex():
|
| 411 |
+
input_requires_grad = any(t.requires_grad for t in (query, key, value))
|
| 412 |
+
if torch.is_grad_enabled() and input_requires_grad:
|
| 413 |
+
example_vals = [
|
| 414 |
+
torch.zeros((), dtype=query.dtype, requires_grad=input_requires_grad)
|
| 415 |
+
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
|
| 416 |
+
fw_graph, bw_graph = create_fw_bw_graph(
|
| 417 |
+
score_mod, example_vals, other_buffers
|
| 418 |
+
)
|
| 419 |
+
else:
|
| 420 |
+
fw_graph, bw_graph = score_mod, None
|
| 421 |
+
out, logsumexp = FlexAttentionAutogradOp.apply(
|
| 422 |
+
query, key, value, fw_graph, bw_graph, *other_buffers
|
| 423 |
+
)
|
| 424 |
+
return out, logsumexp
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
# ---------------------------- Backward HOP Implementation ----------------------------
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
@flex_attention_backward.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 431 |
+
def sdpa_dense_backward(
|
| 432 |
+
query: torch.Tensor,
|
| 433 |
+
key: torch.Tensor,
|
| 434 |
+
value: torch.Tensor,
|
| 435 |
+
out: torch.Tensor,
|
| 436 |
+
logsumexp: torch.Tensor,
|
| 437 |
+
grad_out: torch.Tensor,
|
| 438 |
+
fw_graph: Callable, # GraphModule type hint?
|
| 439 |
+
joint_graph: Callable,
|
| 440 |
+
*other_buffers: torch.Tensor,
|
| 441 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 442 |
+
working_precision = torch.float64 if query.dtype == torch.float64 else torch.float32
|
| 443 |
+
scores = (query @ key.transpose(-2, -1)).to(working_precision)
|
| 444 |
+
|
| 445 |
+
b = torch.arange(0, scores.size(0), device=scores.device)
|
| 446 |
+
h = torch.arange(0, scores.size(1), device=scores.device)
|
| 447 |
+
m = torch.arange(0, scores.size(2), device=scores.device)
|
| 448 |
+
n = torch.arange(0, scores.size(3), device=scores.device)
|
| 449 |
+
|
| 450 |
+
in_dim_buffers = (None,) * len(other_buffers)
|
| 451 |
+
score_mod = torch.vmap(fw_graph, in_dims=(0, None, None, None, 0) + in_dim_buffers)
|
| 452 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, None, None, 0, None) + in_dim_buffers)
|
| 453 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers)
|
| 454 |
+
score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers)
|
| 455 |
+
|
| 456 |
+
with TransformGetItemToIndex():
|
| 457 |
+
post_mod_scores = score_mod(scores, b, h, m, n, *other_buffers).to(
|
| 458 |
+
working_precision
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
softmax_scores = torch.exp(post_mod_scores - logsumexp.unsqueeze(-1))
|
| 462 |
+
|
| 463 |
+
grad_value = softmax_scores.to(query.dtype).transpose(-2, -1) @ grad_out
|
| 464 |
+
|
| 465 |
+
grad_softmax_scores = grad_out @ value.transpose(-2, -1)
|
| 466 |
+
|
| 467 |
+
sum_scores = torch.sum(out * grad_out, -1, keepdim=True)
|
| 468 |
+
grad_score_mod = softmax_scores * (grad_softmax_scores - sum_scores)
|
| 469 |
+
|
| 470 |
+
# Gradient of the inline score_mod function, with respect to the scores
|
| 471 |
+
in_dim_buffers = (None,) * len(other_buffers)
|
| 472 |
+
out_dims = [0, None, None, None, None] + [None] * len(other_buffers)
|
| 473 |
+
joint_score_mod = torch.vmap(
|
| 474 |
+
joint_graph,
|
| 475 |
+
in_dims=(0, None, None, None, 0, 0) + in_dim_buffers,
|
| 476 |
+
out_dims=out_dims,
|
| 477 |
+
)
|
| 478 |
+
joint_score_mod = torch.vmap(
|
| 479 |
+
joint_score_mod,
|
| 480 |
+
in_dims=(0, None, None, 0, None, 0) + in_dim_buffers,
|
| 481 |
+
out_dims=out_dims,
|
| 482 |
+
)
|
| 483 |
+
joint_score_mod = torch.vmap(
|
| 484 |
+
joint_score_mod,
|
| 485 |
+
in_dims=(0, None, 0, None, None, 0) + in_dim_buffers,
|
| 486 |
+
out_dims=out_dims,
|
| 487 |
+
)
|
| 488 |
+
joint_score_mod = torch.vmap(
|
| 489 |
+
joint_score_mod,
|
| 490 |
+
in_dims=(0, 0, None, None, None, 0) + in_dim_buffers,
|
| 491 |
+
out_dims=out_dims,
|
| 492 |
+
)
|
| 493 |
+
with TransformGetItemToIndex():
|
| 494 |
+
grad_scores, *_ = joint_score_mod(
|
| 495 |
+
scores, b, h, m, n, grad_score_mod, *other_buffers
|
| 496 |
+
)
|
| 497 |
+
grad_scores = grad_scores.to(query.dtype)
|
| 498 |
+
|
| 499 |
+
grad_query = grad_scores @ key
|
| 500 |
+
grad_key = grad_scores.transpose(-2, -1) @ query
|
| 501 |
+
return grad_query.contiguous(), grad_key.contiguous(), grad_value.contiguous()
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
def trace_flex_attention_backward(
|
| 505 |
+
proxy_mode: ProxyTorchDispatchMode,
|
| 506 |
+
query: torch.Tensor,
|
| 507 |
+
key: torch.Tensor,
|
| 508 |
+
value: torch.Tensor,
|
| 509 |
+
out: torch.Tensor,
|
| 510 |
+
logsumexp: torch.Tensor,
|
| 511 |
+
grad_out: torch.Tensor,
|
| 512 |
+
fw_graph: Union[Callable, GraphModule],
|
| 513 |
+
joint_graph: GraphModule,
|
| 514 |
+
*other_buffers: torch.Tensor,
|
| 515 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 516 |
+
"""We already have the forward graph and joint graph from the forward pass, so we create a proxy attach both graphs"""
|
| 517 |
+
example_out = flex_attention_backward(
|
| 518 |
+
query,
|
| 519 |
+
key,
|
| 520 |
+
value,
|
| 521 |
+
out,
|
| 522 |
+
logsumexp,
|
| 523 |
+
grad_out,
|
| 524 |
+
fw_graph,
|
| 525 |
+
joint_graph,
|
| 526 |
+
*other_buffers,
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
fw_example_vals = [
|
| 530 |
+
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
|
| 531 |
+
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
|
| 532 |
+
bw_example_vals = fw_example_vals + [torch.zeros((), dtype=query.dtype)]
|
| 533 |
+
with TransformGetItemToIndex():
|
| 534 |
+
fw_graph = reenter_make_fx(fw_graph)(*fw_example_vals, *other_buffers)
|
| 535 |
+
joint_graph = reenter_make_fx(joint_graph)(*bw_example_vals, *other_buffers)
|
| 536 |
+
proxy_mode.tracer.root.register_module("fw_graph", fw_graph)
|
| 537 |
+
proxy_mode.tracer.root.register_module("joint_graph", joint_graph)
|
| 538 |
+
node_args = (
|
| 539 |
+
query,
|
| 540 |
+
key,
|
| 541 |
+
value,
|
| 542 |
+
out,
|
| 543 |
+
logsumexp,
|
| 544 |
+
grad_out,
|
| 545 |
+
fw_graph,
|
| 546 |
+
joint_graph,
|
| 547 |
+
*other_buffers,
|
| 548 |
+
)
|
| 549 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
|
| 550 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 551 |
+
"call_function",
|
| 552 |
+
flex_attention_backward,
|
| 553 |
+
proxy_args,
|
| 554 |
+
{},
|
| 555 |
+
name="flex_attention_backward",
|
| 556 |
+
)
|
| 557 |
+
return track_tensor_tree(
|
| 558 |
+
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
@flex_attention_backward.py_impl(ProxyTorchDispatchMode)
|
| 563 |
+
def flex_attention_backward_proxy_torch_dispatch_mode(
|
| 564 |
+
mode: ProxyTorchDispatchMode,
|
| 565 |
+
query: torch.Tensor,
|
| 566 |
+
key: torch.Tensor,
|
| 567 |
+
value: torch.Tensor,
|
| 568 |
+
out: torch.Tensor,
|
| 569 |
+
logsumexp: torch.Tensor,
|
| 570 |
+
grad_out: torch.Tensor,
|
| 571 |
+
fw_graph: Union[Callable, GraphModule],
|
| 572 |
+
joint_graph: GraphModule,
|
| 573 |
+
*other_buffers: torch.Tensor,
|
| 574 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 575 |
+
assert mode is not None, "Mode should always be enabled for python fallback key"
|
| 576 |
+
if mode.enable_tracing:
|
| 577 |
+
return trace_flex_attention_backward(
|
| 578 |
+
mode,
|
| 579 |
+
query,
|
| 580 |
+
key,
|
| 581 |
+
value,
|
| 582 |
+
out,
|
| 583 |
+
logsumexp,
|
| 584 |
+
grad_out,
|
| 585 |
+
fw_graph,
|
| 586 |
+
joint_graph,
|
| 587 |
+
*other_buffers,
|
| 588 |
+
)
|
| 589 |
+
else:
|
| 590 |
+
return flex_attention_backward(
|
| 591 |
+
query,
|
| 592 |
+
key,
|
| 593 |
+
value,
|
| 594 |
+
out,
|
| 595 |
+
logsumexp,
|
| 596 |
+
grad_out,
|
| 597 |
+
fw_graph,
|
| 598 |
+
joint_graph,
|
| 599 |
+
*other_buffers,
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
@flex_attention_backward.py_functionalize_impl
|
| 604 |
+
def flex_attention_backward_functionalize(
|
| 605 |
+
ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI,
|
| 606 |
+
query: torch.Tensor,
|
| 607 |
+
key: torch.Tensor,
|
| 608 |
+
value: torch.Tensor,
|
| 609 |
+
out: torch.Tensor,
|
| 610 |
+
logsumexp: torch.Tensor,
|
| 611 |
+
grad_out: torch.Tensor,
|
| 612 |
+
fw_graph: Union[Callable, GraphModule],
|
| 613 |
+
joint_graph: GraphModule,
|
| 614 |
+
*other_buffers: torch.Tensor,
|
| 615 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 616 |
+
"""Defines the functionalization rules for the flex_attention operator.
|
| 617 |
+
|
| 618 |
+
Write now we are unwrapping each tensor and then redispatching to the next,
|
| 619 |
+
since we know that the forward score mod function is assured to be free of mutations
|
| 620 |
+
to the other_buffers, we skip that mutate check and go straight to redispatching.
|
| 621 |
+
"""
|
| 622 |
+
query_unwrapped = ctx.unwrap_tensors(query)
|
| 623 |
+
key_unwrapped = ctx.unwrap_tensors(key)
|
| 624 |
+
value_unwrapped = ctx.unwrap_tensors(value)
|
| 625 |
+
out_unwrapped = ctx.unwrap_tensors(out)
|
| 626 |
+
logsumexp_unwrapped = ctx.unwrap_tensors(logsumexp)
|
| 627 |
+
grad_out_unwrapped = ctx.unwrap_tensors(grad_out)
|
| 628 |
+
other_buffers_unwrapped = ctx.unwrap_tensors(other_buffers)
|
| 629 |
+
|
| 630 |
+
# Appease the mypy overlords
|
| 631 |
+
assert isinstance(query_unwrapped, torch.Tensor)
|
| 632 |
+
assert isinstance(key_unwrapped, torch.Tensor)
|
| 633 |
+
assert isinstance(value_unwrapped, torch.Tensor)
|
| 634 |
+
assert isinstance(out_unwrapped, torch.Tensor)
|
| 635 |
+
assert isinstance(logsumexp_unwrapped, torch.Tensor)
|
| 636 |
+
assert isinstance(grad_out_unwrapped, torch.Tensor)
|
| 637 |
+
assert isinstance(other_buffers_unwrapped, tuple)
|
| 638 |
+
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
|
| 639 |
+
|
| 640 |
+
with ctx.redispatch_to_next() as m:
|
| 641 |
+
functional_fw_graph = ctx.functionalize(fw_graph)
|
| 642 |
+
functional_joint_graph = ctx.functionalize(joint_graph)
|
| 643 |
+
|
| 644 |
+
grad_query, grad_key, grad_value = flex_attention_backward(
|
| 645 |
+
query_unwrapped,
|
| 646 |
+
key_unwrapped,
|
| 647 |
+
value_unwrapped,
|
| 648 |
+
out_unwrapped,
|
| 649 |
+
logsumexp_unwrapped,
|
| 650 |
+
grad_out_unwrapped,
|
| 651 |
+
functional_fw_graph, # type: ignore[arg-type]
|
| 652 |
+
functional_joint_graph, # type: ignore[arg-type]
|
| 653 |
+
*other_buffers_unwrapped,
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
return ctx.wrap_tensors((grad_query, grad_key, grad_value)) # type: ignore[return-value,arg-type]
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
@flex_attention_backward.py_impl(FakeTensorMode)
|
| 660 |
+
def flex_attention_backward_fake_tensor_mode(
|
| 661 |
+
mode: FakeTensorMode,
|
| 662 |
+
query: torch.Tensor,
|
| 663 |
+
key: torch.Tensor,
|
| 664 |
+
value: torch.Tensor,
|
| 665 |
+
out: torch.Tensor,
|
| 666 |
+
logsumexp: torch.Tensor,
|
| 667 |
+
grad_out: torch.Tensor,
|
| 668 |
+
fw_graph: Union[Callable, GraphModule],
|
| 669 |
+
joint_graph: GraphModule,
|
| 670 |
+
*other_buffers: torch.Tensor,
|
| 671 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 672 |
+
with mode:
|
| 673 |
+
grad_query = torch.empty_like(query, memory_format=torch.contiguous_format)
|
| 674 |
+
grad_key = torch.empty_like(key, memory_format=torch.contiguous_format)
|
| 675 |
+
grad_value = torch.empty_like(value, memory_format=torch.contiguous_format)
|
| 676 |
+
return grad_query, grad_key, grad_value
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
flex_attention_backward.py_impl(DispatchKey.Autograd)(
|
| 680 |
+
autograd_not_implemented(flex_attention_backward, deferred_error=True)
|
| 681 |
+
)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/map.py
ADDED
|
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch.utils._pytree as pytree
|
| 4 |
+
from torch._C import DispatchKey
|
| 5 |
+
from torch._dispatch.python import suspend_functionalization
|
| 6 |
+
from torch._functorch.aot_autograd import AOTConfig, create_joint, from_fun
|
| 7 |
+
|
| 8 |
+
from torch._higher_order_ops.utils import (
|
| 9 |
+
_has_potential_branch_input_alias,
|
| 10 |
+
_has_potential_branch_input_mutation,
|
| 11 |
+
reenter_make_fx,
|
| 12 |
+
UnsupportedAliasMutationException,
|
| 13 |
+
)
|
| 14 |
+
from torch._ops import HigherOrderOperator
|
| 15 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 16 |
+
from torch._subclasses.functional_tensor import (
|
| 17 |
+
disable_functional_mode,
|
| 18 |
+
FunctionalTensor,
|
| 19 |
+
)
|
| 20 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 21 |
+
disable_proxy_modes_tracing,
|
| 22 |
+
make_fx,
|
| 23 |
+
ProxyTorchDispatchMode,
|
| 24 |
+
track_tensor_tree,
|
| 25 |
+
)
|
| 26 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# TODO: We add this to prevent dymamo from tracing into map_wrapper,
|
| 30 |
+
# remove the wrapper call when it's ready.
|
| 31 |
+
class MapWrapper(HigherOrderOperator):
|
| 32 |
+
def __call__(self, xs, *args):
|
| 33 |
+
return map_wrapper(xs, *args)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
map = MapWrapper("map")
|
| 37 |
+
map_impl = HigherOrderOperator("map_impl")
|
| 38 |
+
|
| 39 |
+
dummy_aot_config = AOTConfig(
|
| 40 |
+
fw_compiler=None, # type: ignore[arg-type]
|
| 41 |
+
bw_compiler=None, # type: ignore[arg-type]
|
| 42 |
+
partition_fn=None, # type: ignore[arg-type]
|
| 43 |
+
decompositions={},
|
| 44 |
+
num_params_buffers=0,
|
| 45 |
+
aot_id=0,
|
| 46 |
+
keep_inference_input_mutations=False,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def create_fw_bw_graph(f, num_mapped_args, *args):
|
| 51 |
+
mapped_xs = args[:num_mapped_args]
|
| 52 |
+
pos_args = args[num_mapped_args:]
|
| 53 |
+
|
| 54 |
+
# Note:[HOP create fw_bw graph] We create "clean" environments for make_fx by suspending all dispatch keys
|
| 55 |
+
# between Autograd and Python key. Currently, we only suspend functionalization but more can be
|
| 56 |
+
# added when required. Will encounter two problems if we don't suspend functionalization:
|
| 57 |
+
#
|
| 58 |
+
# 1. make_fx fails to capture operations on input: the inputs are wrapped as _to_functional_tensor_wrapper,
|
| 59 |
+
# but they will be unwrapped before entering ProxyTorchDispatchMode as part of the dispatching.
|
| 60 |
+
# However, it's the outside wrapper that tracer creates proxies for. This casuses tracer fail to
|
| 61 |
+
# fetch the proxy for the inputs and fail to capture any operations on them.
|
| 62 |
+
#
|
| 63 |
+
# 2. make_fx fails to capture output: the outputs after ProxyTorchDispatchMode are further
|
| 64 |
+
# wrapped as FunctionalTensorWrapper in Functionalize key after return. However, the tracer
|
| 65 |
+
# only associates the inner tensor with proxy in ProxyTorchDispatchMode. Therefore,
|
| 66 |
+
# when creating the output node, it fails to associate the wrapped tensor with its proxy.
|
| 67 |
+
# Instead, it will create _tensor_constant as output.
|
| 68 |
+
|
| 69 |
+
with suspend_functionalization(), disable_functional_mode():
|
| 70 |
+
with disable_proxy_modes_tracing():
|
| 71 |
+
|
| 72 |
+
def _from_fun(t):
|
| 73 |
+
if isinstance(t, torch.Tensor):
|
| 74 |
+
if t.dtype != torch.bool:
|
| 75 |
+
return torch.empty_strided(
|
| 76 |
+
t.size(),
|
| 77 |
+
t.stride(),
|
| 78 |
+
dtype=t.dtype,
|
| 79 |
+
requires_grad=t.requires_grad,
|
| 80 |
+
)
|
| 81 |
+
else:
|
| 82 |
+
# clone of a functional tensor produces a functional tensor
|
| 83 |
+
# but we want to avoid it so we clone a non-functional version
|
| 84 |
+
maybe_unfunc_t = t
|
| 85 |
+
if isinstance(t, FunctionalTensor):
|
| 86 |
+
torch._sync(t)
|
| 87 |
+
maybe_unfunc_t = from_fun(t)
|
| 88 |
+
elif torch._is_functional_tensor(t):
|
| 89 |
+
# need to handle both types of functionalization here:
|
| 90 |
+
# these are the tensors that came from the user,
|
| 91 |
+
# which could be either FunctionalTensorWrapper or FunctionalTensor
|
| 92 |
+
torch._sync(t)
|
| 93 |
+
maybe_unfunc_t = torch._from_functional_tensor(t)
|
| 94 |
+
return maybe_unfunc_t.clone()
|
| 95 |
+
return t
|
| 96 |
+
|
| 97 |
+
unwrapped_mapped_xs = pytree.tree_map(_from_fun, mapped_xs)
|
| 98 |
+
example_xs = _unstack_pytree(unwrapped_mapped_xs)[0]
|
| 99 |
+
|
| 100 |
+
example_pos_args = [
|
| 101 |
+
_from_fun(arg) if isinstance(arg, torch.Tensor) else arg
|
| 102 |
+
for arg in pos_args
|
| 103 |
+
]
|
| 104 |
+
example_flat_out = pytree.tree_map(
|
| 105 |
+
_from_fun, f(*example_xs, *example_pos_args)
|
| 106 |
+
)
|
| 107 |
+
if any(
|
| 108 |
+
not isinstance(out, torch.Tensor)
|
| 109 |
+
for out in example_flat_out
|
| 110 |
+
if out is not None
|
| 111 |
+
):
|
| 112 |
+
raise RuntimeError(
|
| 113 |
+
"Expect outputs of map only contains tensors or None. "
|
| 114 |
+
f"Got types {[type(out) for out in example_flat_out]}."
|
| 115 |
+
)
|
| 116 |
+
example_grad = [_from_fun(out) for out in example_flat_out]
|
| 117 |
+
|
| 118 |
+
fw_graph = make_fx(f)(*example_xs, *example_pos_args)
|
| 119 |
+
|
| 120 |
+
def joint_f(*example_args):
|
| 121 |
+
joint_mapped_args = example_args[:joint_num_mapped]
|
| 122 |
+
args = example_args[joint_num_mapped:]
|
| 123 |
+
|
| 124 |
+
mapped_input = joint_mapped_args[:num_mapped_args]
|
| 125 |
+
mapped_grads = joint_mapped_args[num_mapped_args:]
|
| 126 |
+
|
| 127 |
+
def fw_with_masks(*args):
|
| 128 |
+
fw_out = f(*args)
|
| 129 |
+
return fw_out, [
|
| 130 |
+
True
|
| 131 |
+
if isinstance(ret, torch.Tensor) and ret.requires_grad
|
| 132 |
+
else False
|
| 133 |
+
for ret in fw_out
|
| 134 |
+
]
|
| 135 |
+
|
| 136 |
+
joint = create_joint(fw_with_masks, aot_config=dummy_aot_config)
|
| 137 |
+
_, grads = joint(
|
| 138 |
+
list(mapped_input) + list(args),
|
| 139 |
+
[
|
| 140 |
+
grad
|
| 141 |
+
for grad in mapped_grads
|
| 142 |
+
if grad is not None and grad.requires_grad
|
| 143 |
+
],
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# In order to keep map functional for backward graph,
|
| 147 |
+
# we clone outputs that are aliasing inputs
|
| 148 |
+
input_storage = {
|
| 149 |
+
StorageWeakRef(arg._typed_storage())
|
| 150 |
+
for arg in example_args
|
| 151 |
+
if isinstance(arg, torch.Tensor)
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
def maybe_clone(t):
|
| 155 |
+
if (
|
| 156 |
+
isinstance(t, torch.Tensor)
|
| 157 |
+
and StorageWeakRef(t._typed_storage()) in input_storage
|
| 158 |
+
):
|
| 159 |
+
return t.clone()
|
| 160 |
+
return t
|
| 161 |
+
|
| 162 |
+
return pytree.tree_map(maybe_clone, grads)
|
| 163 |
+
|
| 164 |
+
joint_num_mapped = len(example_grad) + len(example_xs)
|
| 165 |
+
joint_graph = make_fx(joint_f)(*example_xs, *example_grad, *example_pos_args)
|
| 166 |
+
return fw_graph, joint_graph
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def map_wrapper(f, xs, *args):
|
| 170 |
+
flat_xs, xs_spec = pytree.tree_flatten(xs)
|
| 171 |
+
if not all(isinstance(t, torch.Tensor) for t in flat_xs):
|
| 172 |
+
raise RuntimeError(f"Mapped xs can only consist of tensors. Got xs {flat_xs}.")
|
| 173 |
+
|
| 174 |
+
num_mapped_args = len(flat_xs)
|
| 175 |
+
shapes = [xs.shape for xs in flat_xs]
|
| 176 |
+
leading_dim_size = shapes[0][0]
|
| 177 |
+
if leading_dim_size == 0:
|
| 178 |
+
raise RuntimeError("Leading dimensions of mapped xs cannot be 0.")
|
| 179 |
+
|
| 180 |
+
if any(cur_shape[0] != leading_dim_size for cur_shape in shapes):
|
| 181 |
+
raise RuntimeError(
|
| 182 |
+
f"Leading dimensions of mapped xs must be consistent. Got shapes {shapes}."
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
out_spec = None
|
| 186 |
+
|
| 187 |
+
def flat_fn(*flat_args):
|
| 188 |
+
xs = pytree.tree_unflatten(list(flat_args[:num_mapped_args]), xs_spec)
|
| 189 |
+
unflattened_out = f(xs, *flat_args[num_mapped_args:])
|
| 190 |
+
flat_out, tmp_out_spec = pytree.tree_flatten(unflattened_out)
|
| 191 |
+
|
| 192 |
+
nonlocal out_spec
|
| 193 |
+
out_spec = tmp_out_spec
|
| 194 |
+
return flat_out
|
| 195 |
+
|
| 196 |
+
return pytree.tree_unflatten(
|
| 197 |
+
map_impl(flat_fn, flat_xs, args), out_spec # type: ignore[arg-type]
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class MapAutogradOp(torch.autograd.Function):
|
| 202 |
+
@staticmethod
|
| 203 |
+
def forward(ctx, fw_graph, joint_graph, num_mapped_args, *flat_args):
|
| 204 |
+
ctx.save_for_backward(*flat_args)
|
| 205 |
+
ctx._joint_graph = joint_graph
|
| 206 |
+
ctx._num_mapped_args = num_mapped_args
|
| 207 |
+
with torch._C._AutoDispatchBelowAutograd():
|
| 208 |
+
return (
|
| 209 |
+
*map_impl(
|
| 210 |
+
fw_graph, flat_args[:num_mapped_args], flat_args[num_mapped_args:]
|
| 211 |
+
),
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
@staticmethod
|
| 215 |
+
def backward(ctx, *flat_grads):
|
| 216 |
+
fw_args = ctx.saved_tensors
|
| 217 |
+
fw_mapped_args = fw_args[: ctx._num_mapped_args]
|
| 218 |
+
pos_args = fw_args[ctx._num_mapped_args :]
|
| 219 |
+
|
| 220 |
+
grads = map_impl(
|
| 221 |
+
ctx._joint_graph,
|
| 222 |
+
fw_mapped_args + flat_grads,
|
| 223 |
+
pos_args,
|
| 224 |
+
)
|
| 225 |
+
return None, None, None, *grads
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def trace_map(proxy_mode, func_overload, f, xs, pos_args):
|
| 229 |
+
leading_dim_size = xs[0].shape[0]
|
| 230 |
+
|
| 231 |
+
example_input = _unstack_pytree(xs)[0]
|
| 232 |
+
body_graph = f
|
| 233 |
+
|
| 234 |
+
body_graph = reenter_make_fx(body_graph)(*example_input, *pos_args)
|
| 235 |
+
|
| 236 |
+
next_name = proxy_mode.tracer.get_fresh_qualname("body_graph_")
|
| 237 |
+
|
| 238 |
+
proxy_mode.tracer.root.register_module(next_name, body_graph)
|
| 239 |
+
|
| 240 |
+
with disable_proxy_modes_tracing():
|
| 241 |
+
example_outs = body_graph(*example_input, *pos_args)
|
| 242 |
+
|
| 243 |
+
def expand_tensor(t):
|
| 244 |
+
if isinstance(t, torch.Tensor):
|
| 245 |
+
return t.expand(leading_dim_size, *t.shape)
|
| 246 |
+
return t
|
| 247 |
+
|
| 248 |
+
expanded_outs = pytree.tree_map(expand_tensor, example_outs)
|
| 249 |
+
|
| 250 |
+
node_args = (body_graph, list(xs), list(pos_args))
|
| 251 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
|
| 252 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 253 |
+
"call_function", func_overload, proxy_args, {}, name="map_impl"
|
| 254 |
+
)
|
| 255 |
+
return track_tensor_tree(
|
| 256 |
+
expanded_outs, out_proxy, constant=None, tracer=proxy_mode.tracer
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _unstack_pytree(xs):
|
| 261 |
+
flat_xs, inspec = pytree.tree_flatten(xs)
|
| 262 |
+
if not all(isinstance(xs, torch.Tensor) for xs in flat_xs):
|
| 263 |
+
raise RuntimeError(f"Leaves of xs must be Tensor {flat_xs}")
|
| 264 |
+
|
| 265 |
+
if not all(xs.shape[0] == flat_xs[0].shape[0] for xs in flat_xs):
|
| 266 |
+
raise RuntimeError(
|
| 267 |
+
f"Leaves of xs must have same leading dimension size {[xs.shape for xs in flat_xs]}"
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
a = zip(*flat_xs)
|
| 271 |
+
|
| 272 |
+
pytrees = []
|
| 273 |
+
for tuple in a:
|
| 274 |
+
pytrees.append(pytree.tree_unflatten(tuple, inspec))
|
| 275 |
+
return pytrees
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def _stack_pytree(pytrees):
|
| 279 |
+
flat_out = []
|
| 280 |
+
out_spec = None
|
| 281 |
+
for pt in pytrees:
|
| 282 |
+
flat_pt, out_spec = pytree.tree_flatten(pt)
|
| 283 |
+
flat_out.append(flat_pt)
|
| 284 |
+
assert out_spec is not None
|
| 285 |
+
b = zip(*flat_out)
|
| 286 |
+
stacked_out = []
|
| 287 |
+
for leaves in b:
|
| 288 |
+
if all(isinstance(leaf, torch.Tensor) for leaf in leaves):
|
| 289 |
+
stacked_out.append(torch.stack(leaves))
|
| 290 |
+
elif all(leaf is None for leaf in leaves):
|
| 291 |
+
# Backward graph can return None output when forward inputs doesn't require grad.
|
| 292 |
+
# When we eagerly execute backward graph, we need to call _stack_pytree on its output,
|
| 293 |
+
# therefore we need to deal with None output.
|
| 294 |
+
stacked_out.append(None) # type: ignore[arg-type]
|
| 295 |
+
else:
|
| 296 |
+
raise RuntimeError(f"Cannot stack {leaves}.")
|
| 297 |
+
return pytree.tree_unflatten(stacked_out, out_spec)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@map_impl.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 301 |
+
def map_dense(f, xs, pos_args):
|
| 302 |
+
pytrees = []
|
| 303 |
+
for inp in _unstack_pytree(xs):
|
| 304 |
+
pytrees.append(f(*inp, *pos_args))
|
| 305 |
+
return _stack_pytree(pytrees)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
@map_impl.py_impl(DispatchKey.Autograd)
|
| 309 |
+
def map_autograd(f, xs, pos_args):
|
| 310 |
+
num_mapped_args = len(xs)
|
| 311 |
+
fw_graph, bw_graph = create_fw_bw_graph(f, num_mapped_args, *xs, *pos_args)
|
| 312 |
+
flat_out = MapAutogradOp.apply(fw_graph, bw_graph, num_mapped_args, *xs, *pos_args)
|
| 313 |
+
return flat_out
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
@map_impl.py_impl(ProxyTorchDispatchMode)
|
| 317 |
+
def map_proxy_torch_dispatch_mode(mode, f, xs, args):
|
| 318 |
+
if mode.enable_tracing:
|
| 319 |
+
return trace_map(mode, map_impl, f, xs, args)
|
| 320 |
+
else:
|
| 321 |
+
return map_impl(f, xs, args)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
@map_impl.py_impl(FakeTensorMode)
|
| 325 |
+
def map_fake_tensor_mode(mode, f, xs, args):
|
| 326 |
+
with mode:
|
| 327 |
+
return map_dense(f, xs, args)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@map_impl.py_functionalize_impl
|
| 331 |
+
def map_functionalize(ctx, f, xs, pos_args):
|
| 332 |
+
unwrapped_xs = ctx.unwrap_tensors(xs)
|
| 333 |
+
unwrapped_args = ctx.unwrap_tensors(pos_args)
|
| 334 |
+
wrapped_fn = ctx.functionalize(f)
|
| 335 |
+
|
| 336 |
+
with ctx.redispatch_to_next():
|
| 337 |
+
with disable_proxy_modes_tracing():
|
| 338 |
+
example_inputs = (*_unstack_pytree(unwrapped_xs)[0], *unwrapped_args)
|
| 339 |
+
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
|
| 340 |
+
if _has_potential_branch_input_mutation(
|
| 341 |
+
f, example_inputs, pre_dispatch=pre_dispatch
|
| 342 |
+
):
|
| 343 |
+
raise UnsupportedAliasMutationException("torch.map is mutating the input!")
|
| 344 |
+
|
| 345 |
+
if _has_potential_branch_input_alias(
|
| 346 |
+
f, example_inputs, pre_dispatch=pre_dispatch
|
| 347 |
+
):
|
| 348 |
+
raise UnsupportedAliasMutationException("torch.map is aliasing the input!")
|
| 349 |
+
|
| 350 |
+
map_return = map_impl(wrapped_fn, unwrapped_xs, unwrapped_args)
|
| 351 |
+
return ctx.wrap_tensors(map_return)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/out_dtype.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.utils._pytree as pytree
|
| 5 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 6 |
+
disable_proxy_modes_tracing,
|
| 7 |
+
ProxyTorchDispatchMode,
|
| 8 |
+
track_tensor_tree,
|
| 9 |
+
maybe_handle_decomp,
|
| 10 |
+
)
|
| 11 |
+
from torch._C import DispatchKey
|
| 12 |
+
from torch._ops import HigherOrderOperator
|
| 13 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 14 |
+
from torch._prims_common import elementwise_dtypes, ELEMENTWISE_TYPE_PROMOTION_KIND
|
| 15 |
+
from torch._higher_order_ops.utils import autograd_not_implemented
|
| 16 |
+
|
| 17 |
+
# TODO to figure out a more generic approach
|
| 18 |
+
ALLOWABLE_OPS = [
|
| 19 |
+
torch.ops.aten.linear.default,
|
| 20 |
+
torch.ops.aten.mm.default,
|
| 21 |
+
torch.ops.aten.conv2d.default,
|
| 22 |
+
torch.ops.aten.convolution.default,
|
| 23 |
+
torch.ops.aten.mul.Tensor,
|
| 24 |
+
torch.ops.aten.mul.Scalar,
|
| 25 |
+
torch.ops.aten.div.Tensor,
|
| 26 |
+
torch.ops.aten.div.Scalar,
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class OutDtypeOperator(HigherOrderOperator):
|
| 31 |
+
"""
|
| 32 |
+
The out_dtype operator takes an existing ATen functional operator, an
|
| 33 |
+
`out_dtype` argument, and arguments to the original operator, and executes
|
| 34 |
+
the original operator and returns a Tensor with the `out_dtype` precision.
|
| 35 |
+
This operator does not mandate a compute precision so it allows the
|
| 36 |
+
representation to not be opinionated about the exact implementation.
|
| 37 |
+
|
| 38 |
+
The general implementation for all operators will be the following:
|
| 39 |
+
1. Promote inputs dtypes based on default PyTorch dtype promotion rules,
|
| 40 |
+
using the dtypes of all input Tensors/Scalars and the `out_dtype`
|
| 41 |
+
arugument.
|
| 42 |
+
2. Execute the operator
|
| 43 |
+
3. Cast the output to `out_dtype`
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def __init__(self):
|
| 48 |
+
super().__init__("out_dtype")
|
| 49 |
+
# TODO(ydwu4): Subclassing HigherOrderOperator causes __module__ to
|
| 50 |
+
# become different (torch._higher_order_ops.out_dtype) which will result
|
| 51 |
+
# in torch.fx to record the op incorrectly in the graph.
|
| 52 |
+
self.__module__ = "torch.ops.higher_order"
|
| 53 |
+
|
| 54 |
+
def __call__(self, op, output_dtype, *args):
|
| 55 |
+
if not isinstance(op, torch._ops.OpOverload):
|
| 56 |
+
raise ValueError("out_dtype's first argument must be an OpOverload")
|
| 57 |
+
if op._schema.is_mutable:
|
| 58 |
+
raise ValueError("out_dtype's first argument needs to be a functional operator")
|
| 59 |
+
if not (
|
| 60 |
+
len(op._schema.returns) == 1 and
|
| 61 |
+
isinstance(op._schema.returns[0].type, torch.TensorType)
|
| 62 |
+
):
|
| 63 |
+
raise ValueError(
|
| 64 |
+
"out_dtype's can only apply to ops that return a single tensor"
|
| 65 |
+
f"Instead got {[r.type for r in op._schema.returns]}"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
if op not in ALLOWABLE_OPS:
|
| 69 |
+
raise ValueError(
|
| 70 |
+
f"out_dtype only allows the following operators: {ALLOWABLE_OPS}."
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
res = super().__call__(op, output_dtype, *args)
|
| 74 |
+
|
| 75 |
+
return res
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
out_dtype = OutDtypeOperator()
|
| 79 |
+
|
| 80 |
+
def trace_out_dtype(proxy_mode, func_overload, op, output_dtype, *args):
|
| 81 |
+
# NB: Long-term we should put the decomposition logic into
|
| 82 |
+
# ProxyTorchDispatchMode so that people do not need to call maybe_handle_decomp
|
| 83 |
+
# in all HigherOrderOp proxy implementations.
|
| 84 |
+
r = maybe_handle_decomp(proxy_mode, func_overload, (op, output_dtype, *args), {})
|
| 85 |
+
if r is not NotImplemented:
|
| 86 |
+
return r
|
| 87 |
+
|
| 88 |
+
with disable_proxy_modes_tracing():
|
| 89 |
+
# This is a simplified implementation of this operator just for tracing.
|
| 90 |
+
# Actual implementation may also first promote the arguments
|
| 91 |
+
out = op(*args).to(dtype=output_dtype)
|
| 92 |
+
|
| 93 |
+
node_args = (op, output_dtype, *args)
|
| 94 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
|
| 95 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 96 |
+
"call_function", func_overload, proxy_args, {}, name="out_dtype"
|
| 97 |
+
)
|
| 98 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@out_dtype.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 102 |
+
def out_dtype_dense(
|
| 103 |
+
op: torch._ops.OpOverload,
|
| 104 |
+
output_dtype: torch.dtype,
|
| 105 |
+
*args
|
| 106 |
+
):
|
| 107 |
+
if is_int_mm(op, output_dtype, args):
|
| 108 |
+
return torch._int_mm(*args)
|
| 109 |
+
return out_dtype_fallback(op, output_dtype, *args)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def is_int_mm(op, output_dtype, args):
|
| 113 |
+
return (
|
| 114 |
+
op == torch.ops.aten.mm.default and
|
| 115 |
+
output_dtype == torch.int32 and
|
| 116 |
+
len(args) == 2 and
|
| 117 |
+
args[0].dtype == torch.int8 and
|
| 118 |
+
args[1].dtype == torch.int8 and
|
| 119 |
+
args[0].is_cuda and
|
| 120 |
+
args[1].is_cuda
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def out_dtype_fallback(op, output_dtype, *args):
|
| 125 |
+
flat_inputs = pytree.arg_tree_leaves(*args) + [torch.ones(1, dtype=output_dtype)]
|
| 126 |
+
promote_dtype: torch.dtype = elementwise_dtypes(
|
| 127 |
+
*flat_inputs,
|
| 128 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
|
| 129 |
+
)[0]
|
| 130 |
+
|
| 131 |
+
casted_args = pytree.tree_map_only(
|
| 132 |
+
torch.Tensor, lambda arg: arg.to(dtype=promote_dtype), args
|
| 133 |
+
)
|
| 134 |
+
res = op(*casted_args).to(dtype=output_dtype)
|
| 135 |
+
return res
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
out_dtype.py_impl(DispatchKey.Autograd)(autograd_not_implemented(out_dtype, deferred_error=True))
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@out_dtype.py_impl(ProxyTorchDispatchMode)
|
| 142 |
+
def out_dtype_proxy(
|
| 143 |
+
mode: ProxyTorchDispatchMode,
|
| 144 |
+
op: torch._ops.OpOverload,
|
| 145 |
+
output_dtype: torch.dtype,
|
| 146 |
+
*args
|
| 147 |
+
):
|
| 148 |
+
if mode.enable_tracing:
|
| 149 |
+
return trace_out_dtype(mode, out_dtype, op, output_dtype, *args)
|
| 150 |
+
else:
|
| 151 |
+
return out_dtype(op, output_dtype, *args)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
@out_dtype.py_impl(FakeTensorMode)
|
| 155 |
+
def out_dtype_fake_tensor_mode(
|
| 156 |
+
mode: FakeTensorMode,
|
| 157 |
+
op: torch._ops.OpOverload,
|
| 158 |
+
output_dtype: torch.dtype,
|
| 159 |
+
*args
|
| 160 |
+
):
|
| 161 |
+
with mode:
|
| 162 |
+
return out_dtype_dense(op, output_dtype, *args)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@out_dtype.py_functionalize_impl
|
| 166 |
+
def out_dtype_func(ctx, op, output_dtype, *args):
|
| 167 |
+
unwrapped_args = tuple(ctx.unwrap_tensors(arg) for arg in args)
|
| 168 |
+
|
| 169 |
+
with ctx.redispatch_to_next():
|
| 170 |
+
res = out_dtype(op, output_dtype, *unwrapped_args)
|
| 171 |
+
return ctx.wrap_tensors(res)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/strict_mode.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import torch
|
| 3 |
+
import torch._subclasses.functional_tensor
|
| 4 |
+
|
| 5 |
+
import torch.utils._pytree as pytree
|
| 6 |
+
|
| 7 |
+
from torch._C import DispatchKey
|
| 8 |
+
from torch._functorch.utils import exposed_in
|
| 9 |
+
|
| 10 |
+
from torch._higher_order_ops.utils import _set_compilation_env, autograd_not_implemented
|
| 11 |
+
from torch._ops import HigherOrderOperator
|
| 12 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 13 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 14 |
+
disable_proxy_modes_tracing,
|
| 15 |
+
make_fx,
|
| 16 |
+
ProxyTorchDispatchMode,
|
| 17 |
+
track_tensor_tree,
|
| 18 |
+
)
|
| 19 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@exposed_in("torch")
|
| 23 |
+
def strict_mode(callable, operands):
|
| 24 |
+
if torch.compiler.is_dynamo_compiling():
|
| 25 |
+
return strict_mode_op(callable, operands)
|
| 26 |
+
|
| 27 |
+
with _set_compilation_env():
|
| 28 |
+
with torch._dynamo.utils.disable_cache_limit():
|
| 29 |
+
return torch.compile(strict_mode_op, backend="eager", fullgraph=True)(
|
| 30 |
+
callable, operands
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
strict_mode_op = HigherOrderOperator("strict_mode")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@strict_mode_op.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 38 |
+
def strict_mode_op_dense(callable, operands):
|
| 39 |
+
mode = _get_current_dispatch_mode()
|
| 40 |
+
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
|
| 41 |
+
return callable(*operands)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
strict_mode_op.py_impl(DispatchKey.Autograd)(
|
| 45 |
+
autograd_not_implemented(strict_mode_op, deferred_error=True)
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@strict_mode_op.py_impl(ProxyTorchDispatchMode)
|
| 50 |
+
def inner(mode, callable, operands):
|
| 51 |
+
if mode.enable_tracing:
|
| 52 |
+
return trace_strict_mode(mode, strict_mode_op, callable, operands)
|
| 53 |
+
else:
|
| 54 |
+
return strict_mode_op(callable, operands)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def trace_strict_mode(mode, strict_mode_op, callable, operands):
|
| 58 |
+
pre_dispatch = getattr(mode, "pre_dispatch", False)
|
| 59 |
+
|
| 60 |
+
with disable_proxy_modes_tracing():
|
| 61 |
+
graph = make_fx(callable, pre_dispatch=pre_dispatch)(*operands)
|
| 62 |
+
|
| 63 |
+
graph_name = mode.tracer.get_fresh_qualname("strict_graph_")
|
| 64 |
+
mode.tracer.root.register_module(graph_name, graph)
|
| 65 |
+
|
| 66 |
+
args = (graph, operands)
|
| 67 |
+
|
| 68 |
+
proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
|
| 69 |
+
|
| 70 |
+
out_proxy = mode.tracer.create_proxy(
|
| 71 |
+
"call_function", strict_mode_op, proxy_args, {}, name="strict_mode"
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
out = graph(*operands)
|
| 75 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@strict_mode_op.py_impl(FakeTensorMode)
|
| 79 |
+
def strict_mode_fake_tensor_mode(mode, callable, operands):
|
| 80 |
+
with mode:
|
| 81 |
+
true_outs = callable(*operands)
|
| 82 |
+
return true_outs
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@strict_mode_op.py_functionalize_impl
|
| 86 |
+
def strict_mode_func(ctx, callable, inputs):
|
| 87 |
+
unwrapped_inputs = ctx.unwrap_tensors(inputs)
|
| 88 |
+
with ctx.redispatch_to_next():
|
| 89 |
+
functional_callable = ctx.functionalize(callable)
|
| 90 |
+
|
| 91 |
+
cond_return = strict_mode_op(functional_callable, unwrapped_inputs)
|
| 92 |
+
return ctx.wrap_tensors(cond_return)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/torchbind.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import logging
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch._C import DispatchKey # @manual
|
| 7 |
+
from torch._functorch._aot_autograd.utils import KNOWN_TYPES
|
| 8 |
+
from torch._higher_order_ops.utils import autograd_not_implemented
|
| 9 |
+
from torch._library.fake_class_registry import _ns_and_class_name, FakeScriptObject
|
| 10 |
+
from torch._ops import HigherOrderOperator
|
| 11 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 12 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
|
| 13 |
+
from torch.fx.node import has_side_effect
|
| 14 |
+
from torch.utils import _pytree as pytree
|
| 15 |
+
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# The call_torchbind operator represents a method invocation on a torchbind
|
| 19 |
+
# object. The calling convention is:
|
| 20 |
+
# call_torchbind(self: ScriptObject, method_name: str, *method_args, **method_kwargs)
|
| 21 |
+
# We do not expect users to write this operator directly. Instead it will be
|
| 22 |
+
# emitted by Dynamo when tracing encounters a torchbind object.
|
| 23 |
+
call_torchbind = HigherOrderOperator("call_torchbind")
|
| 24 |
+
|
| 25 |
+
# Register this operator as side-effectful with FX.
|
| 26 |
+
# TODO: this is not really sufficient. While passes (hopefully) check
|
| 27 |
+
# Node.is_impure() and make good decisions, we also assume we can execute the
|
| 28 |
+
# graph as many times as we want without changing behavior, which is NOT true of
|
| 29 |
+
# ops that mutate torchbind object state.
|
| 30 |
+
has_side_effect(call_torchbind)
|
| 31 |
+
|
| 32 |
+
_orig_scriptmethod_call = torch.ScriptMethod.__call__
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def torchbind_method_redispatch(self, *args, **kwargs):
|
| 36 |
+
if isinstance(self.raw_owner, torch.ScriptObject):
|
| 37 |
+
return call_torchbind(self.raw_owner, self.name, *args, **kwargs)
|
| 38 |
+
return _orig_scriptmethod_call(self, *args, **kwargs)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@contextmanager
|
| 42 |
+
def enable_torchbind_tracing():
|
| 43 |
+
"""Context manager that acts as a feature flag to enable torchbind tracing
|
| 44 |
+
behavior. Once torchbind tracing has been stabilized, we can remove this and
|
| 45 |
+
turn it always on.
|
| 46 |
+
"""
|
| 47 |
+
try:
|
| 48 |
+
KNOWN_TYPES.append(torch.ScriptObject)
|
| 49 |
+
torch.ScriptMethod.__call__ = torchbind_method_redispatch # type: ignore[method-assign]
|
| 50 |
+
yield
|
| 51 |
+
finally:
|
| 52 |
+
assert (
|
| 53 |
+
KNOWN_TYPES.pop() is torch.ScriptObject
|
| 54 |
+
), "Someone else messed with KNOWN_TYPES during tracing, exploding."
|
| 55 |
+
torch.ScriptMethod.__call__ = _orig_scriptmethod_call # type: ignore[method-assign]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@call_torchbind.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 59 |
+
def call_torchbind_impl(obj, method, *args, **kwargs):
|
| 60 |
+
if isinstance(obj, torch.ScriptObject):
|
| 61 |
+
return _orig_scriptmethod_call(getattr(obj, method), *args, **kwargs)
|
| 62 |
+
elif isinstance(obj, FakeScriptObject):
|
| 63 |
+
return getattr(obj.wrapped_obj, method)(*args, **kwargs)
|
| 64 |
+
else:
|
| 65 |
+
raise RuntimeError(f"Unsupported first arg type {type(obj)} for call_torchbind")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@call_torchbind.py_impl(ProxyTorchDispatchMode)
|
| 69 |
+
def inner(mode, *args, **kwargs):
|
| 70 |
+
if mode.enable_tracing:
|
| 71 |
+
proxy_args = pytree.tree_map(mode.tracer.unwrap_proxy, args)
|
| 72 |
+
proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
|
| 73 |
+
|
| 74 |
+
out_proxy = mode.tracer.create_proxy(
|
| 75 |
+
"call_function",
|
| 76 |
+
call_torchbind,
|
| 77 |
+
proxy_args,
|
| 78 |
+
proxy_kwargs,
|
| 79 |
+
)
|
| 80 |
+
out = call_torchbind(*args, **kwargs)
|
| 81 |
+
|
| 82 |
+
obj, method, *rest_args = args
|
| 83 |
+
if isinstance(obj, torch.ScriptObject):
|
| 84 |
+
ns, class_name = _ns_and_class_name(
|
| 85 |
+
obj._type().qualified_name() # type: ignore[attr-defined]
|
| 86 |
+
)
|
| 87 |
+
log.warning(
|
| 88 |
+
"Tracing torchbind method %s.%s with real ScriptObject. This may"
|
| 89 |
+
" cause the original object being mutated. If this is not intended,"
|
| 90 |
+
' You can register a fake class with torch._library.register_fake_class("%s::%s").',
|
| 91 |
+
class_name,
|
| 92 |
+
method,
|
| 93 |
+
ns,
|
| 94 |
+
class_name,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
|
| 98 |
+
else:
|
| 99 |
+
return call_torchbind(*args, **kwargs)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# TODO: currently we just run the C++ implementation with fake tensors.
|
| 103 |
+
# But we should make it possible to register a fake torchbind implementation.
|
| 104 |
+
@call_torchbind.py_impl(FakeTensorMode)
|
| 105 |
+
def call_torchbind_fake(mode, *args, **kwargs):
|
| 106 |
+
with mode:
|
| 107 |
+
return call_torchbind_impl(*args, **kwargs)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
call_torchbind.py_impl(DispatchKey.Autograd)(
|
| 111 |
+
autograd_not_implemented(call_torchbind, deferred_error=True)
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@call_torchbind.py_functionalize_impl
|
| 116 |
+
def call_torchbind_func(ctx, *args, **kwargs):
|
| 117 |
+
args = ctx.unwrap_tensors(args)
|
| 118 |
+
with ctx.redispatch_to_next():
|
| 119 |
+
return ctx.wrap_tensors(call_torchbind(*args, **kwargs))
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/triton_kernel_wrap.py
ADDED
|
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import dataclasses
|
| 3 |
+
import inspect
|
| 4 |
+
import logging
|
| 5 |
+
import threading
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from typing import Any, Dict, List, Optional, Union
|
| 8 |
+
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
from torch import Tensor
|
| 11 |
+
from torch._C import DispatchKey
|
| 12 |
+
from torch._ops import HigherOrderOperator
|
| 13 |
+
from torch._prims_common import clone_preserve_strides
|
| 14 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 15 |
+
from torch.fx.experimental.proxy_tensor import (
|
| 16 |
+
disable_proxy_modes_tracing,
|
| 17 |
+
ProxyTorchDispatchMode,
|
| 18 |
+
track_tensor_tree,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
log = logging.getLogger("torch._dynamo")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
###############################################################################
|
| 25 |
+
# Kernel Side Table
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# We cannot put Triton Kernels into the FX graph as the graph nodes
|
| 29 |
+
# do not support arbitrary functions.
|
| 30 |
+
# Use a side table.
|
| 31 |
+
# We use two dicts so that fetching both the kernel and id are O(1)
|
| 32 |
+
class KernelSideTable:
|
| 33 |
+
id_to_kernel: Dict[int, Any] = dict()
|
| 34 |
+
kernel_to_id: Dict[Any, int] = dict()
|
| 35 |
+
constant_args: Dict[int, Any] = dict()
|
| 36 |
+
lock = threading.Lock()
|
| 37 |
+
|
| 38 |
+
# Returns index on the table
|
| 39 |
+
def add_kernel(self, kernel) -> int:
|
| 40 |
+
with self.lock:
|
| 41 |
+
if kernel in self.kernel_to_id:
|
| 42 |
+
return self.kernel_to_id[kernel]
|
| 43 |
+
|
| 44 |
+
idx = len(self.id_to_kernel)
|
| 45 |
+
self.id_to_kernel[idx] = kernel
|
| 46 |
+
self.kernel_to_id[kernel] = idx
|
| 47 |
+
return idx
|
| 48 |
+
|
| 49 |
+
# Returns the triton kernel at the given index
|
| 50 |
+
def get_kernel(self, idx: int):
|
| 51 |
+
# No need to lock here as fetching from dict is atomic
|
| 52 |
+
assert idx in self.id_to_kernel
|
| 53 |
+
return self.id_to_kernel[idx]
|
| 54 |
+
|
| 55 |
+
# Not every constant arg can be added to the graph. Use this side table
|
| 56 |
+
# for constant args.
|
| 57 |
+
def add_constant_args(self, args) -> int:
|
| 58 |
+
with self.lock:
|
| 59 |
+
idx = len(self.constant_args)
|
| 60 |
+
self.constant_args[idx] = args
|
| 61 |
+
return idx
|
| 62 |
+
|
| 63 |
+
# Returns the constant args
|
| 64 |
+
def get_constant_args(self, idx: int):
|
| 65 |
+
# No need to lock here as fetching from dict is atomic
|
| 66 |
+
assert idx in self.constant_args
|
| 67 |
+
return self.constant_args[idx]
|
| 68 |
+
|
| 69 |
+
# Resets the table (only meant to be used in unit tests)
|
| 70 |
+
# This is only safe assuming single threaded execution
|
| 71 |
+
def reset_table(self) -> None:
|
| 72 |
+
self.id_to_kernel = dict()
|
| 73 |
+
self.kernel_to_id = dict()
|
| 74 |
+
self.constant_args = dict()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
kernel_side_table = KernelSideTable()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
###############################################################################
|
| 81 |
+
# Mutation Tracker
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@dataclasses.dataclass(frozen=True)
|
| 85 |
+
class Param:
|
| 86 |
+
idx: int
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@dataclasses.dataclass(frozen=True)
|
| 90 |
+
class Intermediate:
|
| 91 |
+
idx: int
|
| 92 |
+
|
| 93 |
+
def fake(self):
|
| 94 |
+
return self.idx < 0
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@dataclasses.dataclass(frozen=True)
|
| 98 |
+
class Op:
|
| 99 |
+
name: str
|
| 100 |
+
fn_call_name: Optional[str]
|
| 101 |
+
args: List[Union[Param, Intermediate]]
|
| 102 |
+
ret: Intermediate = dataclasses.field(repr=False)
|
| 103 |
+
|
| 104 |
+
def __post_init__(self):
|
| 105 |
+
if self.name == "tt.call":
|
| 106 |
+
assert self.fn_call_name is not None
|
| 107 |
+
else:
|
| 108 |
+
assert self.fn_call_name is None
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def generate_ttir(kernel, kwargs):
|
| 112 |
+
"""
|
| 113 |
+
Uses Triton's internal code generation to create TTIR
|
| 114 |
+
"""
|
| 115 |
+
import sympy
|
| 116 |
+
import triton
|
| 117 |
+
from triton.compiler.compiler import ASTSource
|
| 118 |
+
from triton.runtime.autotuner import Autotuner
|
| 119 |
+
from triton.runtime.jit import JITFunction
|
| 120 |
+
|
| 121 |
+
import torch
|
| 122 |
+
import torch._inductor.ir
|
| 123 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 124 |
+
|
| 125 |
+
if isinstance(kernel, Autotuner):
|
| 126 |
+
if len(kernel.configs) > 0:
|
| 127 |
+
# If we are autotuning, then it doesn't matter which version gets
|
| 128 |
+
# picked for tracing purposes, so lets pick the first one
|
| 129 |
+
kwargs = {**kwargs, **kernel.configs[0].kwargs}
|
| 130 |
+
kernel = kernel.fn
|
| 131 |
+
|
| 132 |
+
assert isinstance(kernel, JITFunction)
|
| 133 |
+
|
| 134 |
+
if len(kwargs) != len(kernel.arg_names):
|
| 135 |
+
raise ValueError("Incorrect number of arguments passed to kernel")
|
| 136 |
+
|
| 137 |
+
# Replace all SymExprs with a regular value for TTIR generation
|
| 138 |
+
# Replace all FakeTensor/TensorBox with real tensors
|
| 139 |
+
# These replacements are needed for triton's type, key and config functions
|
| 140 |
+
ordered_args: Dict[str, Any] = {}
|
| 141 |
+
for name in kernel.arg_names:
|
| 142 |
+
a = kwargs[name]
|
| 143 |
+
if isinstance(a, (torch.SymInt, torch.SymFloat, torch.SymBool, sympy.Expr)):
|
| 144 |
+
ordered_args[name] = 2
|
| 145 |
+
elif isinstance(a, (FakeTensor, torch._inductor.ir.TensorBox)):
|
| 146 |
+
with torch._C._DisableTorchDispatch():
|
| 147 |
+
ordered_args[name] = torch.empty(2, dtype=a.dtype)
|
| 148 |
+
else:
|
| 149 |
+
ordered_args[name] = a
|
| 150 |
+
|
| 151 |
+
ordered_tensor_names = [
|
| 152 |
+
name for name, arg in ordered_args.items() if isinstance(arg, Tensor)
|
| 153 |
+
]
|
| 154 |
+
specialization = kernel._get_config(*ordered_args.values())
|
| 155 |
+
constants = {
|
| 156 |
+
i: arg
|
| 157 |
+
for i, arg in enumerate(ordered_args.values())
|
| 158 |
+
if not isinstance(arg, Tensor)
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# Build kernel signature -- doesn't include constexpr arguments.
|
| 162 |
+
signature = {
|
| 163 |
+
i: kernel._type_of(kernel._key_of(arg))
|
| 164 |
+
for i, arg in enumerate(ordered_args.values())
|
| 165 |
+
if i not in kernel.constexprs
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
context = triton._C.libtriton.ir.context()
|
| 169 |
+
target = triton.runtime.driver.active.get_current_target()
|
| 170 |
+
backend = triton.compiler.compiler.make_backend(target)
|
| 171 |
+
options = backend.parse_options(dict())
|
| 172 |
+
triton._C.libtriton.ir.load_dialects(context)
|
| 173 |
+
backend.load_dialects(context)
|
| 174 |
+
|
| 175 |
+
src = ASTSource(kernel, signature, constants, specialization)
|
| 176 |
+
|
| 177 |
+
# Triton changes ASTSource.make_ir to take 3 arguments. Handle
|
| 178 |
+
# backward compatibility here.
|
| 179 |
+
if len(inspect.signature(src.make_ir).parameters) == 2:
|
| 180 |
+
ttir_module = src.make_ir(options, context)
|
| 181 |
+
else:
|
| 182 |
+
codegen_fns = backend.get_codegen_implementation()
|
| 183 |
+
ttir_module = src.make_ir(options, codegen_fns, context)
|
| 184 |
+
if not ttir_module.verify():
|
| 185 |
+
raise RuntimeError("Verification for TTIR module has failed")
|
| 186 |
+
|
| 187 |
+
return ttir_module, ordered_tensor_names
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def ttir_to_functions(ttir_module) -> Dict[str, Dict[Intermediate, List[Op]]]:
|
| 191 |
+
"""
|
| 192 |
+
Walk the `ttir_module` bottom up to mine the `functions` from
|
| 193 |
+
the structured MLIR entities representing the Triton kernel
|
| 194 |
+
(mlir::Operation, mlir::Block, mlir::Region).
|
| 195 |
+
"""
|
| 196 |
+
functions: Dict[str, Dict[Intermediate, List[Op]]] = {}
|
| 197 |
+
|
| 198 |
+
# block id --> op result (Intermediate) --> one or more ops
|
| 199 |
+
op_stack: Dict[int, Dict[Intermediate, List[Op]]] = defaultdict(
|
| 200 |
+
lambda: defaultdict(list)
|
| 201 |
+
)
|
| 202 |
+
region_id_to_block_ids: Dict[int, List[int]] = defaultdict(list)
|
| 203 |
+
block_id_to_block_arg_ids: Dict[int, List[int]] = {}
|
| 204 |
+
replacements: Dict[int, Union[Intermediate, Param]] = {}
|
| 205 |
+
reindex_map: Dict[int, int] = {}
|
| 206 |
+
next_fake_intermediate = 0
|
| 207 |
+
|
| 208 |
+
def reindex(idx):
|
| 209 |
+
if idx not in reindex_map:
|
| 210 |
+
reindex_map[idx] = len(reindex_map)
|
| 211 |
+
return reindex_map[idx]
|
| 212 |
+
|
| 213 |
+
def mlir_to_functions(op) -> None:
|
| 214 |
+
name: str = op.get_name()
|
| 215 |
+
if name == "builtin.module":
|
| 216 |
+
# this wraps all tt.func ops
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
operand_ids: List[int] = [
|
| 220 |
+
reindex(op.get_operand(i).id()) for i in range(op.get_num_operands())
|
| 221 |
+
]
|
| 222 |
+
result_ids: List[int] = [
|
| 223 |
+
reindex(op.get_result(i).id()) for i in range(op.get_num_results())
|
| 224 |
+
]
|
| 225 |
+
|
| 226 |
+
child_block_ids: List[int] = []
|
| 227 |
+
for i in [op.get_region(i).id() for i in range(op.get_num_regions())]:
|
| 228 |
+
# as the walk is bottom-up, the region_id_to_block_ids[i]
|
| 229 |
+
# must be populated by the time we process the enclosing op
|
| 230 |
+
child_block_ids.extend(region_id_to_block_ids[i])
|
| 231 |
+
|
| 232 |
+
parent_block_id = -1
|
| 233 |
+
parent_block = op.get_block()
|
| 234 |
+
if parent_block is not None:
|
| 235 |
+
parent_block_id = parent_block.id()
|
| 236 |
+
if parent_block_id not in block_id_to_block_arg_ids:
|
| 237 |
+
block_id_to_block_arg_ids[parent_block_id] = []
|
| 238 |
+
for i in range(parent_block.get_num_arguments()):
|
| 239 |
+
block_id_to_block_arg_ids[parent_block_id].append(
|
| 240 |
+
reindex(parent_block.get_argument(i).id()),
|
| 241 |
+
)
|
| 242 |
+
# the region info is collected via ops' parent blocks to be
|
| 243 |
+
# used later when the region's encloding op is traversed
|
| 244 |
+
parent_region = parent_block.get_parent()
|
| 245 |
+
if parent_region is not None:
|
| 246 |
+
region_id_to_block_ids[parent_region.id()].append(parent_block_id)
|
| 247 |
+
|
| 248 |
+
nonlocal next_fake_intermediate
|
| 249 |
+
|
| 250 |
+
if name == "tt.func":
|
| 251 |
+
# for function ops: gather and inline
|
| 252 |
+
# the ops from all child blocks
|
| 253 |
+
fn_ops = defaultdict(list)
|
| 254 |
+
for child_block_id in child_block_ids:
|
| 255 |
+
for result, block_fn_ops in op_stack.pop(child_block_id).items():
|
| 256 |
+
for block_fn_op in block_fn_ops:
|
| 257 |
+
fn_ops[result].append(block_fn_op)
|
| 258 |
+
|
| 259 |
+
# replace the corresponding Intermediates in the
|
| 260 |
+
# child op args with the function args (Params)
|
| 261 |
+
for i, idx in enumerate(block_id_to_block_arg_ids[child_block_ids[0]]):
|
| 262 |
+
replacements[idx] = Param(i)
|
| 263 |
+
|
| 264 |
+
for fn_op_list in fn_ops.values():
|
| 265 |
+
for fn_op in fn_op_list:
|
| 266 |
+
for i in range(len(fn_op.args)):
|
| 267 |
+
arg = fn_op.args[i]
|
| 268 |
+
seen = set() # to break cycles
|
| 269 |
+
# there can be transitive replacements, but likely
|
| 270 |
+
# no cycles (we keep the `seen` set just in case)
|
| 271 |
+
while (
|
| 272 |
+
isinstance(arg, Intermediate)
|
| 273 |
+
and arg.idx in replacements
|
| 274 |
+
and arg.idx not in seen
|
| 275 |
+
):
|
| 276 |
+
seen.add(arg.idx)
|
| 277 |
+
arg = fn_op.args[i] = replacements[arg.idx]
|
| 278 |
+
|
| 279 |
+
# next function capture starts
|
| 280 |
+
# with empty replacements
|
| 281 |
+
replacements.clear()
|
| 282 |
+
|
| 283 |
+
fn_name = op.get_str_attr("sym_name")
|
| 284 |
+
functions[fn_name] = fn_ops
|
| 285 |
+
elif child_block_ids:
|
| 286 |
+
if name in {"scf.if", "scf.for", "scf.while", "tt.reduce", "tt.scan"}:
|
| 287 |
+
# for blocked ops: inline the enclosed ops into
|
| 288 |
+
# the parent block + rewire the last op in each
|
| 289 |
+
# child block to return the block result
|
| 290 |
+
return_ops = []
|
| 291 |
+
for block_id in child_block_ids:
|
| 292 |
+
if name == "scf.for":
|
| 293 |
+
# example:
|
| 294 |
+
# %result = scf.for %iv = %lb to %ub step %step iter_args(%arg = %init) -> (i32) ...
|
| 295 |
+
# block args: 2 (%iv, %arg)
|
| 296 |
+
# op operands: 4 (%lb, %ub, %step, %init)
|
| 297 |
+
# `%arg` is mapping to `%init`
|
| 298 |
+
for i, idx in enumerate(block_id_to_block_arg_ids[block_id]):
|
| 299 |
+
if i == 0:
|
| 300 |
+
next_fake_intermediate -= 1
|
| 301 |
+
replacements[idx] = Intermediate(next_fake_intermediate)
|
| 302 |
+
else:
|
| 303 |
+
replacements[idx] = Intermediate(operand_ids[i + 2])
|
| 304 |
+
elif name == "scf.while":
|
| 305 |
+
# example:
|
| 306 |
+
# %3:3 = scf.while (%arg2 = %1, %arg3 = %2, %arg4 = %c0_i32_8) ...
|
| 307 |
+
# block args: 3 (%arg2, %arg3, %arg4)
|
| 308 |
+
# op operands: 3 (%1, %2, %c0_i32_8)
|
| 309 |
+
# `%arg2` is mapping to `%1`, `%arg3` is mapping to `%2`, ...
|
| 310 |
+
for i, idx in enumerate(block_id_to_block_arg_ids[block_id]):
|
| 311 |
+
replacements[idx] = Intermediate(operand_ids[i])
|
| 312 |
+
elif name == "scf.if":
|
| 313 |
+
# the scf block args are ignored by the pass. but, as they
|
| 314 |
+
# may be used as operands of the ops inside the block
|
| 315 |
+
# (and nested blocks inlined in the current block by now),
|
| 316 |
+
# they are replaced by new fake Intermediates to avoid "this
|
| 317 |
+
# operand is not returned by any other op in the fn" error
|
| 318 |
+
# in the downstream analysis
|
| 319 |
+
for idx in block_id_to_block_arg_ids[block_id]:
|
| 320 |
+
next_fake_intermediate -= 1
|
| 321 |
+
replacements[idx] = Intermediate(next_fake_intermediate)
|
| 322 |
+
else:
|
| 323 |
+
assert name in ("tt.reduce", "tt.scan")
|
| 324 |
+
# wire the block arguments to the op arguments
|
| 325 |
+
num_operands = len(operand_ids)
|
| 326 |
+
block_arg_ids = block_id_to_block_arg_ids[block_id]
|
| 327 |
+
assert len(block_arg_ids) == 2 * num_operands, (
|
| 328 |
+
f"{name} is expected to have twice as "
|
| 329 |
+
"many block arguments as op arguments: "
|
| 330 |
+
f"{operand_ids=}, {block_arg_ids=}."
|
| 331 |
+
)
|
| 332 |
+
for i, idx in enumerate(block_arg_ids):
|
| 333 |
+
# for a tt.reduce/tt.scan op with N arguments, the block
|
| 334 |
+
# arguments comprise N reduced values followed by
|
| 335 |
+
# N current values corresponding to the N op args
|
| 336 |
+
replacements[idx] = Intermediate(
|
| 337 |
+
operand_ids[i % num_operands]
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
if block_id in op_stack:
|
| 341 |
+
block_ops = op_stack.pop(block_id)
|
| 342 |
+
if not block_ops:
|
| 343 |
+
continue
|
| 344 |
+
last_ret, last_ops = block_ops.popitem()
|
| 345 |
+
if all(
|
| 346 |
+
op.name
|
| 347 |
+
in ("scf.yield", "tt.reduce.return", "tt.scan.return")
|
| 348 |
+
for op in last_ops
|
| 349 |
+
):
|
| 350 |
+
# if last_ops are all return ops, treat them separately
|
| 351 |
+
return_ops.extend(last_ops)
|
| 352 |
+
else:
|
| 353 |
+
# otherwise, return last_ops to the block
|
| 354 |
+
block_ops[last_ret] = last_ops
|
| 355 |
+
for op_result, child_ops in block_ops.items():
|
| 356 |
+
op_stack[parent_block_id][op_result].extend(child_ops)
|
| 357 |
+
|
| 358 |
+
scf_results = [Intermediate(idx) for idx in result_ids]
|
| 359 |
+
for scf_result in scf_results:
|
| 360 |
+
for return_op in return_ops:
|
| 361 |
+
op_stack[parent_block_id][scf_result].append(return_op)
|
| 362 |
+
else:
|
| 363 |
+
raise RuntimeError(
|
| 364 |
+
f"Unknown blocked function: {name}. Can't capture the TTIR."
|
| 365 |
+
)
|
| 366 |
+
else:
|
| 367 |
+
callee = None
|
| 368 |
+
if name == "tt.call":
|
| 369 |
+
callee = op.get_flat_symbol_ref_attr("callee")
|
| 370 |
+
args: List[Union[Param, Intermediate]] = [
|
| 371 |
+
Intermediate(operand) for operand in operand_ids
|
| 372 |
+
]
|
| 373 |
+
block_ops = op_stack[parent_block_id]
|
| 374 |
+
if result_ids:
|
| 375 |
+
for result_id in result_ids:
|
| 376 |
+
res = Intermediate(result_id)
|
| 377 |
+
block_ops[res].append(Op(name, callee, args, res))
|
| 378 |
+
else:
|
| 379 |
+
next_fake_intermediate -= 1
|
| 380 |
+
fake_res = Intermediate(next_fake_intermediate)
|
| 381 |
+
block_ops[fake_res].append(Op(name, callee, args, fake_res))
|
| 382 |
+
|
| 383 |
+
ttir_module.walk(mlir_to_functions)
|
| 384 |
+
|
| 385 |
+
return functions
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
class MemoizeWithCycleCheck:
|
| 389 |
+
def __init__(self, fn):
|
| 390 |
+
self.fn = fn
|
| 391 |
+
self.reset()
|
| 392 |
+
|
| 393 |
+
def __call__(self, functions, fn_name, num_args):
|
| 394 |
+
key = (fn_name, num_args)
|
| 395 |
+
if key not in self.cache:
|
| 396 |
+
self.cache[key] = None
|
| 397 |
+
self.cache[key] = self.fn(functions, fn_name, num_args)
|
| 398 |
+
if self.cache[key] is None:
|
| 399 |
+
raise RuntimeError("Recursion is not supported")
|
| 400 |
+
return self.cache[key]
|
| 401 |
+
|
| 402 |
+
def reset(self):
|
| 403 |
+
self.cache = {}
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
@MemoizeWithCycleCheck
|
| 407 |
+
def analyze_kernel_mutations(functions, fn_name, num_args):
|
| 408 |
+
"""
|
| 409 |
+
Analyzes the graph to detect all sinks from a predefined list of sinks
|
| 410 |
+
by using triton's MemWrite trait list. NOTE: What if triton exposed this?
|
| 411 |
+
From each sink, it traverses the CFG backwards to identify all the input
|
| 412 |
+
pointers that are mutated.
|
| 413 |
+
"""
|
| 414 |
+
# Name of mutation op to mutated parameter indices
|
| 415 |
+
# List from Triton Github include/triton/Dialect/Triton/IR/TritonOps.td
|
| 416 |
+
# All the OPs that have MemWrite trait.
|
| 417 |
+
# What if Triton exposed this?
|
| 418 |
+
MUTATION_OPS = {"tt.store": [0], "tt.atomic_cas": [0], "tt.atomic_rmw": [0]}
|
| 419 |
+
# Ops that we want to bail out on
|
| 420 |
+
UNKNOWN_OPS = {"tt.elementwise_inline_asm"}
|
| 421 |
+
|
| 422 |
+
stack: List[Union[Param, Intermediate]] = []
|
| 423 |
+
visited = set()
|
| 424 |
+
ops = functions[fn_name]
|
| 425 |
+
for op_list in ops.values():
|
| 426 |
+
for op in op_list:
|
| 427 |
+
if op.name in UNKNOWN_OPS:
|
| 428 |
+
raise RuntimeError(
|
| 429 |
+
f"ttir analysis hit an op we do not know how to analyze: {op.name}"
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
if op.name == "tt.call":
|
| 433 |
+
assert op.fn_call_name in functions
|
| 434 |
+
mutations = analyze_kernel_mutations(
|
| 435 |
+
functions, op.fn_call_name, len(op.args)
|
| 436 |
+
)
|
| 437 |
+
stack.extend(arg for arg, mutated in zip(op.args, mutations) if mutated)
|
| 438 |
+
else:
|
| 439 |
+
for idx in MUTATION_OPS.get(op.name, []):
|
| 440 |
+
stack.append(op.args[idx])
|
| 441 |
+
|
| 442 |
+
# The following is an iterative DFS algorithm
|
| 443 |
+
mutated = [False] * num_args
|
| 444 |
+
while stack:
|
| 445 |
+
arg = stack.pop()
|
| 446 |
+
if arg in visited:
|
| 447 |
+
continue
|
| 448 |
+
|
| 449 |
+
visited.add(arg)
|
| 450 |
+
|
| 451 |
+
if isinstance(arg, Param):
|
| 452 |
+
if arg.idx >= num_args:
|
| 453 |
+
# This is an argument defined in the kernel, not passed in
|
| 454 |
+
continue
|
| 455 |
+
mutated[arg.idx] = True
|
| 456 |
+
elif isinstance(arg, Intermediate) and not arg.fake():
|
| 457 |
+
for op in ops[arg]:
|
| 458 |
+
# Skip arguments to load
|
| 459 |
+
if op.name != "tt.load":
|
| 460 |
+
stack.extend(op.args)
|
| 461 |
+
return mutated
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def identify_mutated_tensors(kernel, kwargs):
|
| 465 |
+
"""
|
| 466 |
+
Given a triton kernel and the arguments for this kernel, this function
|
| 467 |
+
1) Retrieves the TTIR converted version of the kernel from Triton's API.
|
| 468 |
+
2) Parses the TTIR and creates a control flow graph
|
| 469 |
+
3) Analyzes the graph to detect all input tensor mutations
|
| 470 |
+
"""
|
| 471 |
+
|
| 472 |
+
ttir_module = None
|
| 473 |
+
functions = None
|
| 474 |
+
try:
|
| 475 |
+
ttir_module, ordered_tensor_names = generate_ttir(kernel, kwargs)
|
| 476 |
+
|
| 477 |
+
# extract functions from TTIR using MLIR bindings exposed by Triton code
|
| 478 |
+
functions = ttir_to_functions(ttir_module)
|
| 479 |
+
|
| 480 |
+
assert functions is not None
|
| 481 |
+
kernel_name = next(iter(functions.keys()))
|
| 482 |
+
# Triton codegen modifies the name
|
| 483 |
+
assert kernel.fn.__name__ in kernel_name
|
| 484 |
+
# Reset the cache between top level invocations
|
| 485 |
+
# The cache for analyze kernel mutations is mainly used for cycle
|
| 486 |
+
# detection, so each top level invocation needs a clean cache
|
| 487 |
+
analyze_kernel_mutations.reset()
|
| 488 |
+
mutations = analyze_kernel_mutations(
|
| 489 |
+
functions, kernel_name, len(ordered_tensor_names)
|
| 490 |
+
)
|
| 491 |
+
|
| 492 |
+
return [
|
| 493 |
+
ordered_tensor_names[i] for i, mutated in enumerate(mutations) if mutated
|
| 494 |
+
]
|
| 495 |
+
except Exception as e:
|
| 496 |
+
log.warning(
|
| 497 |
+
"Encountered an exception in identify_mutated_tensors, assuming every input is mutated",
|
| 498 |
+
exc_info=True,
|
| 499 |
+
)
|
| 500 |
+
if ttir_module is not None:
|
| 501 |
+
log.debug("TTIR:\n%s", str(ttir_module))
|
| 502 |
+
if functions is not None:
|
| 503 |
+
log.debug("functions:")
|
| 504 |
+
for name, fn in functions.items():
|
| 505 |
+
log.debug("===\t%s\t===", name)
|
| 506 |
+
for ret, ops in fn.items():
|
| 507 |
+
log.debug("%s\t=>\t%s", ret, ops)
|
| 508 |
+
return [key for key, value in kwargs.items() if isinstance(value, Tensor)]
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
###############################################################################
|
| 512 |
+
# Triton Kernel Wrappers
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
# Used for wrapping a Triton Kernel
|
| 516 |
+
class TritonKernelWrapperMutation(HigherOrderOperator):
|
| 517 |
+
def __init__(self):
|
| 518 |
+
super().__init__("triton_kernel_wrapper_mutation")
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
triton_kernel_wrapper_mutation = TritonKernelWrapperMutation()
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
# Used for wrapping a Triton Kernel in a functional manner
|
| 525 |
+
class TritonKernelWrapperFunctional(HigherOrderOperator):
|
| 526 |
+
def __init__(self):
|
| 527 |
+
super().__init__("triton_kernel_wrapper_functional")
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
triton_kernel_wrapper_functional = TritonKernelWrapperFunctional()
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
@triton_kernel_wrapper_mutation.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 534 |
+
def triton_kernel_wrapper_mutation_dense(
|
| 535 |
+
*, kernel_idx, constant_args_idx, grid, kwargs
|
| 536 |
+
):
|
| 537 |
+
from torch._inductor.codegen.wrapper import user_defined_kernel_grid_fn_code
|
| 538 |
+
|
| 539 |
+
kernel = kernel_side_table.get_kernel(kernel_idx)
|
| 540 |
+
constant_args = kernel_side_table.get_constant_args(constant_args_idx)
|
| 541 |
+
|
| 542 |
+
if len(grid) == 1:
|
| 543 |
+
grid_fn = grid[0]
|
| 544 |
+
else:
|
| 545 |
+
fn_name, code = user_defined_kernel_grid_fn_code(
|
| 546 |
+
kernel.fn.__name__, kernel.configs, grid
|
| 547 |
+
)
|
| 548 |
+
namespace: Dict[str, Any] = {}
|
| 549 |
+
exec(code, namespace)
|
| 550 |
+
grid_fn = namespace[fn_name]
|
| 551 |
+
|
| 552 |
+
kernel[grid_fn](**kwargs, **constant_args)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@triton_kernel_wrapper_mutation.py_impl(FakeTensorMode)
|
| 556 |
+
def triton_kernel_wrapper_mutation_fake_tensor_mode(
|
| 557 |
+
mode, *, kernel_idx, constant_args_idx, grid, kwargs
|
| 558 |
+
):
|
| 559 |
+
with mode:
|
| 560 |
+
return None
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def trace_triton_kernel_wrapper(proxy_mode, func_overload, node_args):
|
| 564 |
+
with disable_proxy_modes_tracing():
|
| 565 |
+
out = func_overload(**node_args)
|
| 566 |
+
|
| 567 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
|
| 568 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 569 |
+
"call_function",
|
| 570 |
+
func_overload,
|
| 571 |
+
(),
|
| 572 |
+
proxy_args,
|
| 573 |
+
name=func_overload.__name__ + "_proxy",
|
| 574 |
+
)
|
| 575 |
+
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
@triton_kernel_wrapper_mutation.py_impl(ProxyTorchDispatchMode)
|
| 579 |
+
def triton_kernel_wrapper_mutation_proxy_torch_dispatch_mode(
|
| 580 |
+
mode, *, kernel_idx, constant_args_idx, grid, kwargs
|
| 581 |
+
):
|
| 582 |
+
if mode.enable_tracing:
|
| 583 |
+
trace_triton_kernel_wrapper(
|
| 584 |
+
mode,
|
| 585 |
+
triton_kernel_wrapper_mutation,
|
| 586 |
+
{
|
| 587 |
+
"kernel_idx": kernel_idx,
|
| 588 |
+
"constant_args_idx": constant_args_idx,
|
| 589 |
+
"grid": grid,
|
| 590 |
+
"kwargs": kwargs,
|
| 591 |
+
},
|
| 592 |
+
)
|
| 593 |
+
else:
|
| 594 |
+
triton_kernel_wrapper_mutation(
|
| 595 |
+
kernel_idx=kernel_idx,
|
| 596 |
+
constant_args_idx=constant_args_idx,
|
| 597 |
+
grid=grid,
|
| 598 |
+
kwargs=kwargs,
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
return None
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
@triton_kernel_wrapper_mutation.py_functionalize_impl
|
| 605 |
+
def triton_kernel_wrapper_mutation_functionalize(
|
| 606 |
+
ctx, kernel_idx, constant_args_idx, grid, kwargs
|
| 607 |
+
):
|
| 608 |
+
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
|
| 609 |
+
kernel = kernel_side_table.get_kernel(kernel_idx)
|
| 610 |
+
constant_args = kernel_side_table.get_constant_args(constant_args_idx)
|
| 611 |
+
# TODO(oulgen): Preexisting bug, if two kernel inputs are views of each
|
| 612 |
+
# other, and one gets mutated in kernel, and later another gets mutated,
|
| 613 |
+
# they are no longer equal. Fix this by graph breaking on this condition
|
| 614 |
+
# earlier in dynamo.
|
| 615 |
+
tensors_to_clone = identify_mutated_tensors(
|
| 616 |
+
kernel, {**unwrapped_kwargs, **constant_args}
|
| 617 |
+
)
|
| 618 |
+
with ctx.redispatch_to_next():
|
| 619 |
+
unwrapped_outputs = triton_kernel_wrapper_functional(
|
| 620 |
+
kernel_idx=kernel_idx,
|
| 621 |
+
constant_args_idx=constant_args_idx,
|
| 622 |
+
grid=grid,
|
| 623 |
+
kwargs=unwrapped_kwargs,
|
| 624 |
+
tensors_to_clone=tensors_to_clone,
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
assert set(unwrapped_outputs.keys()).issubset(set(kwargs.keys()))
|
| 628 |
+
for key, output_arg in unwrapped_outputs.items():
|
| 629 |
+
if not isinstance(output_arg, Tensor):
|
| 630 |
+
continue
|
| 631 |
+
input_arg = kwargs[key]
|
| 632 |
+
assert isinstance(input_arg, Tensor)
|
| 633 |
+
|
| 634 |
+
ctx.replace(input_arg, output_arg)
|
| 635 |
+
# indicate that above replace is hidden from autograd
|
| 636 |
+
ctx.mark_mutation_hidden_from_autograd(input_arg)
|
| 637 |
+
ctx.commit_update(input_arg)
|
| 638 |
+
ctx.sync(input_arg)
|
| 639 |
+
return None
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
@triton_kernel_wrapper_functional.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 643 |
+
def triton_kernel_wrapper_functional_dense(
|
| 644 |
+
*, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone
|
| 645 |
+
):
|
| 646 |
+
# TODO(oulgen): For performance reasons, we want to ensure that these
|
| 647 |
+
# `clone_preserve_strides` calls are never executed at runtime
|
| 648 |
+
# (inductor should always optimize them away).
|
| 649 |
+
# Requires https://github.com/pytorch/pytorch/issues/109240
|
| 650 |
+
kwargs = {
|
| 651 |
+
key: (clone_preserve_strides(val) if key in tensors_to_clone else val)
|
| 652 |
+
for key, val in kwargs.items()
|
| 653 |
+
}
|
| 654 |
+
triton_kernel_wrapper_mutation(
|
| 655 |
+
kernel_idx=kernel_idx,
|
| 656 |
+
constant_args_idx=constant_args_idx,
|
| 657 |
+
grid=grid,
|
| 658 |
+
kwargs=kwargs,
|
| 659 |
+
)
|
| 660 |
+
return {key: val for key, val in kwargs.items() if key in tensors_to_clone}
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
@triton_kernel_wrapper_functional.py_impl(FakeTensorMode)
|
| 664 |
+
def triton_kernel_wrapper_functional_fake_tensor_mode(
|
| 665 |
+
mode, *, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone
|
| 666 |
+
):
|
| 667 |
+
# TODO(oulgen): For performance reasons, we want to ensure that these
|
| 668 |
+
# `clone_preserve_strides` calls are never executed at runtime
|
| 669 |
+
# (inductor should always optimize them away).
|
| 670 |
+
# Requires https://github.com/pytorch/pytorch/issues/109240
|
| 671 |
+
with mode:
|
| 672 |
+
return {
|
| 673 |
+
key: clone_preserve_strides(val)
|
| 674 |
+
for key, val in kwargs.items()
|
| 675 |
+
if key in tensors_to_clone
|
| 676 |
+
}
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
@triton_kernel_wrapper_functional.py_impl(ProxyTorchDispatchMode)
|
| 680 |
+
def triton_kernel_wrapper_functional_proxy_torch_dispatch_mode(
|
| 681 |
+
mode, *, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone
|
| 682 |
+
):
|
| 683 |
+
if mode.enable_tracing:
|
| 684 |
+
return trace_triton_kernel_wrapper(
|
| 685 |
+
mode,
|
| 686 |
+
triton_kernel_wrapper_functional,
|
| 687 |
+
{
|
| 688 |
+
"kernel_idx": kernel_idx,
|
| 689 |
+
"constant_args_idx": constant_args_idx,
|
| 690 |
+
"grid": grid,
|
| 691 |
+
"kwargs": kwargs,
|
| 692 |
+
"tensors_to_clone": tensors_to_clone,
|
| 693 |
+
},
|
| 694 |
+
)
|
| 695 |
+
else:
|
| 696 |
+
return triton_kernel_wrapper_functional(
|
| 697 |
+
kernel_idx=kernel_idx,
|
| 698 |
+
grid=grid,
|
| 699 |
+
kwargs=kwargs,
|
| 700 |
+
tensors_to_clone=tensors_to_clone,
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
@triton_kernel_wrapper_functional.py_functionalize_impl
|
| 705 |
+
def triton_kernel_wrapper_functional_functionalize(
|
| 706 |
+
ctx, kernel_idx, constant_args_idx, grid, kwargs, tensors_to_clone
|
| 707 |
+
):
|
| 708 |
+
unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
|
| 709 |
+
with ctx.redispatch_to_next():
|
| 710 |
+
outputs = triton_kernel_wrapper_functional(
|
| 711 |
+
kernel_idx=kernel_idx,
|
| 712 |
+
constant_args_idx=constant_args_idx,
|
| 713 |
+
grid=grid,
|
| 714 |
+
kwargs=unwrapped_kwargs,
|
| 715 |
+
tensors_to_clone=tensors_to_clone,
|
| 716 |
+
)
|
| 717 |
+
return ctx.wrap_tensors(outputs)
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined]
|
| 721 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined]
|
| 722 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.ADInplaceOrView)
|
| 723 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.BackendSelect)
|
| 724 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined]
|
| 725 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined]
|
| 726 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCUDA)
|
| 727 |
+
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCPU)
|
| 728 |
+
|
| 729 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined]
|
| 730 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined]
|
| 731 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.ADInplaceOrView)
|
| 732 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.BackendSelect)
|
| 733 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined]
|
| 734 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined]
|
| 735 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA)
|
| 736 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA)
|
| 737 |
+
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCPU)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/utils.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import functools
|
| 3 |
+
from contextlib import contextmanager
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Any, Callable
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.fx.traceback as fx_traceback
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
from torch._ops import HigherOrderOperator
|
| 11 |
+
from torch.fx.experimental.proxy_tensor import make_fx
|
| 12 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@dataclass
|
| 16 |
+
class UnsupportedAliasMutationException(RuntimeError):
|
| 17 |
+
reason: str
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def autograd_not_implemented_inner(
|
| 21 |
+
operator: HigherOrderOperator, delayed_error: bool, *args: Any, **kwargs: Any
|
| 22 |
+
) -> Any:
|
| 23 |
+
"""If autograd is enabled and any of the arguments require grad this will either
|
| 24 |
+
raise an error or return a DelayedError depending on the value of delayed.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
operator: The HigherOrderOperator to call with the *args and **kwargs with
|
| 28 |
+
op_name: The name of the HigherOrderOperator
|
| 29 |
+
delayed_error: If True, return a DelayedError instead of raising an error
|
| 30 |
+
args: The flattened operands to the HigherOrderOperator
|
| 31 |
+
kwargs: The keyword arguments to the HigherOrderOperator
|
| 32 |
+
|
| 33 |
+
Raises:
|
| 34 |
+
RuntimeError: If autograd is enabled and any of the arguments to the HigherOrderOperator
|
| 35 |
+
"""
|
| 36 |
+
with torch._C._AutoDispatchBelowAutograd():
|
| 37 |
+
result = operator(*args, **kwargs)
|
| 38 |
+
flat_operands = pytree.arg_tree_leaves(*args)
|
| 39 |
+
if torch.is_grad_enabled() and any(
|
| 40 |
+
f.requires_grad for f in flat_operands if isinstance(f, torch.Tensor)
|
| 41 |
+
):
|
| 42 |
+
if delayed_error:
|
| 43 |
+
err_fn = torch._C._functions.DelayedError(
|
| 44 |
+
f"Autograd not implemented for {str(operator)}",
|
| 45 |
+
1,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
def fake_requires_grad(tensor):
|
| 49 |
+
if torch.is_floating_point(tensor) or torch.is_complex(tensor):
|
| 50 |
+
tensor = tensor.detach()
|
| 51 |
+
tensor.requires_grad = True
|
| 52 |
+
return tensor
|
| 53 |
+
|
| 54 |
+
return pytree.tree_map_only(
|
| 55 |
+
torch.Tensor, lambda x: err_fn(fake_requires_grad(x)), result
|
| 56 |
+
)
|
| 57 |
+
else:
|
| 58 |
+
raise RuntimeError(f"Autograd not implemented for {str(operator)}")
|
| 59 |
+
return result
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def autograd_not_implemented(op: HigherOrderOperator, deferred_error: bool) -> Callable:
|
| 63 |
+
def inner(*args, **kwargs):
|
| 64 |
+
return autograd_not_implemented_inner(op, deferred_error, *args, **kwargs)
|
| 65 |
+
|
| 66 |
+
return inner
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def _maybe_run_with_interpreter(fn):
|
| 70 |
+
maybe_interpreted_fn = fn
|
| 71 |
+
if isinstance(fn, torch.fx.GraphModule) and fx_traceback.has_preserved_node_meta():
|
| 72 |
+
# Running graph with interpreter is needed for propagating the stack_trace
|
| 73 |
+
def graph_with_interpreter(*args):
|
| 74 |
+
with fx_traceback.preserve_node_meta():
|
| 75 |
+
return torch.fx.Interpreter(fn).run(*args)
|
| 76 |
+
|
| 77 |
+
maybe_interpreted_fn = graph_with_interpreter
|
| 78 |
+
return maybe_interpreted_fn
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def reenter_make_fx(fn):
|
| 82 |
+
from torch.fx.experimental.proxy_tensor import _CURRENT_MAKE_FX_TRACER
|
| 83 |
+
|
| 84 |
+
@functools.wraps(fn)
|
| 85 |
+
def wrapped(*args):
|
| 86 |
+
assert (
|
| 87 |
+
_CURRENT_MAKE_FX_TRACER is not None
|
| 88 |
+
), "Cannot reenter make_fx when we're not under a make_fx tracing session"
|
| 89 |
+
return _CURRENT_MAKE_FX_TRACER.trace_subgraph(
|
| 90 |
+
_maybe_run_with_interpreter(fn), *args
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
return wrapped
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@contextmanager
|
| 97 |
+
def _set_compilation_env():
|
| 98 |
+
_old_is_tracing = torch.fx._symbolic_trace._is_fx_tracing_flag
|
| 99 |
+
_old_is_inlining = torch._dynamo.config.inline_inbuilt_nn_modules
|
| 100 |
+
try:
|
| 101 |
+
# We need to turn off the is_fx_tracing_flag. Remove this flag check from dyanmo
|
| 102 |
+
# once we are confident fx tracing works with dynamo.
|
| 103 |
+
torch.fx._symbolic_trace._is_fx_tracing_flag = False
|
| 104 |
+
|
| 105 |
+
# TODO(anijain2305, export-team) For non-strict export with module
|
| 106 |
+
# stack info, the codepatch forces the nn module __getattr__ to
|
| 107 |
+
# ProxyAttr __getattr__ downstream. To circumvent the issue for now,
|
| 108 |
+
# skip inlining inbuilt nn modules for cond.
|
| 109 |
+
torch._dynamo.config.inline_inbuilt_nn_modules = False
|
| 110 |
+
yield
|
| 111 |
+
finally:
|
| 112 |
+
torch.fx._symbolic_trace._is_fx_tracing_flag = _old_is_tracing
|
| 113 |
+
torch._dynamo.config.inline_inbuilt_nn_modules = _old_is_inlining
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _has_potential_branch_input_mutation(branch, inputs, pre_dispatch=False):
|
| 117 |
+
"""
|
| 118 |
+
Dispatch-trace the branch with inputs and check if
|
| 119 |
+
producing graph has mutable op on the input. This is
|
| 120 |
+
bit restrictive as the branch must be traceable.
|
| 121 |
+
"""
|
| 122 |
+
try:
|
| 123 |
+
gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs)
|
| 124 |
+
except UnsupportedAliasMutationException:
|
| 125 |
+
# this can happen when nested cond_op is
|
| 126 |
+
# functionalized
|
| 127 |
+
return True
|
| 128 |
+
except Exception as e:
|
| 129 |
+
raise e
|
| 130 |
+
|
| 131 |
+
def _detect_input_mutation(gm):
|
| 132 |
+
input_nodes = set()
|
| 133 |
+
for node in gm.graph.nodes:
|
| 134 |
+
if node.op == "placeholder":
|
| 135 |
+
input_nodes.add(node)
|
| 136 |
+
if node.op == "call_function":
|
| 137 |
+
target = node.target
|
| 138 |
+
if (
|
| 139 |
+
isinstance(target, torch._ops.OpOverload)
|
| 140 |
+
and target._schema.is_mutable
|
| 141 |
+
):
|
| 142 |
+
for arg in node.args:
|
| 143 |
+
if arg in input_nodes:
|
| 144 |
+
return True
|
| 145 |
+
|
| 146 |
+
for _, module in gm.named_children():
|
| 147 |
+
if isinstance(module, torch.fx.GraphModule):
|
| 148 |
+
if _detect_input_mutation(module):
|
| 149 |
+
return True
|
| 150 |
+
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
return _detect_input_mutation(gm)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _has_potential_branch_input_alias(branch, inputs, pre_dispatch=False):
|
| 157 |
+
"""
|
| 158 |
+
Dispatch-trace the branch with inputs and check if
|
| 159 |
+
producing graph has output aliasing the branch input. This is
|
| 160 |
+
bit restrictive as the branch must be traceable.
|
| 161 |
+
"""
|
| 162 |
+
try:
|
| 163 |
+
gm = make_fx(branch, pre_dispatch=pre_dispatch)(*inputs)
|
| 164 |
+
except UnsupportedAliasMutationException:
|
| 165 |
+
# this can happen when nested cond_op is
|
| 166 |
+
# functionalized
|
| 167 |
+
return True
|
| 168 |
+
except Exception as e:
|
| 169 |
+
raise e
|
| 170 |
+
|
| 171 |
+
def _detect_input_alias(gm):
|
| 172 |
+
input_storages = set()
|
| 173 |
+
for node in gm.graph.nodes:
|
| 174 |
+
# We need to check existence of "val" because we reuse the logic here
|
| 175 |
+
# for map operator, where num_mapped_args is a scalar
|
| 176 |
+
# and doesn't have a "val" meta.
|
| 177 |
+
if node.op == "placeholder" and "val" in node.meta:
|
| 178 |
+
input_storages.add(StorageWeakRef(node.meta["val"]._typed_storage()))
|
| 179 |
+
if node.op == "output":
|
| 180 |
+
|
| 181 |
+
def check_alias(out):
|
| 182 |
+
if out is not None and "val" in out.meta:
|
| 183 |
+
out_storage = StorageWeakRef(out.meta["val"]._typed_storage())
|
| 184 |
+
return out_storage in input_storages
|
| 185 |
+
return False
|
| 186 |
+
|
| 187 |
+
if any(pytree.tree_leaves(pytree.tree_map(check_alias, node.args))):
|
| 188 |
+
return True
|
| 189 |
+
|
| 190 |
+
for _, module in gm.named_children():
|
| 191 |
+
if isinstance(module, torch.fx.GraphModule) and _detect_input_alias(module):
|
| 192 |
+
return True
|
| 193 |
+
|
| 194 |
+
return False
|
| 195 |
+
|
| 196 |
+
return _detect_input_alias(gm)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def unique_graph_id(proxy_mode, prefix):
|
| 200 |
+
"""Returns a unique name and id for a graph to be added to a proxy_mode tracer"""
|
| 201 |
+
# There are probably better ways - I know that create_arg has some self incrementing name
|
| 202 |
+
# magic to it, but since we explicitly have to get the name for register_module,
|
| 203 |
+
# I was not sure how to do that. This kinda simulates it.
|
| 204 |
+
next_name = None
|
| 205 |
+
i = 0
|
| 206 |
+
while not next_name:
|
| 207 |
+
candidate = f"{prefix}_{i}"
|
| 208 |
+
if hasattr(proxy_mode.tracer.root, candidate):
|
| 209 |
+
i += 1
|
| 210 |
+
else:
|
| 211 |
+
next_name = candidate
|
| 212 |
+
return i, next_name
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/while_loop.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from typing import Callable, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.utils._pytree as pytree
|
| 6 |
+
|
| 7 |
+
from torch._C import DispatchKey
|
| 8 |
+
|
| 9 |
+
from torch._higher_order_ops.utils import (
|
| 10 |
+
_has_potential_branch_input_alias,
|
| 11 |
+
_has_potential_branch_input_mutation,
|
| 12 |
+
_set_compilation_env,
|
| 13 |
+
autograd_not_implemented,
|
| 14 |
+
reenter_make_fx,
|
| 15 |
+
UnsupportedAliasMutationException,
|
| 16 |
+
)
|
| 17 |
+
from torch._ops import HigherOrderOperator
|
| 18 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 19 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class WhileLoopOp(HigherOrderOperator):
|
| 23 |
+
def __init__(self):
|
| 24 |
+
super().__init__("while_loop")
|
| 25 |
+
|
| 26 |
+
def __call__(
|
| 27 |
+
self,
|
| 28 |
+
cond_fn: Callable,
|
| 29 |
+
body_fn: Callable,
|
| 30 |
+
carried_inputs: Tuple[Union[torch.Tensor, int, float, bool]],
|
| 31 |
+
additional_inputs: Tuple[Union[torch.Tensor, int, float, bool]],
|
| 32 |
+
/,
|
| 33 |
+
):
|
| 34 |
+
if not isinstance(carried_inputs, tuple):
|
| 35 |
+
raise RuntimeError(
|
| 36 |
+
f"carried_inputs must be a tuple, got {type(carried_inputs)}"
|
| 37 |
+
)
|
| 38 |
+
if not isinstance(additional_inputs, tuple):
|
| 39 |
+
raise RuntimeError(
|
| 40 |
+
f"additional_inputs must be a tuple, got {type(additional_inputs)}"
|
| 41 |
+
)
|
| 42 |
+
if not all(
|
| 43 |
+
isinstance(t, (torch.Tensor, int, float, bool)) for t in carried_inputs
|
| 44 |
+
):
|
| 45 |
+
raise RuntimeError(
|
| 46 |
+
"carried_inputs must be a tuple of tensors, ints, floats, or bools, got "
|
| 47 |
+
f"{carried_inputs}"
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
if not all(
|
| 51 |
+
isinstance(t, (torch.Tensor, int, float, bool)) for t in additional_inputs
|
| 52 |
+
):
|
| 53 |
+
raise RuntimeError(
|
| 54 |
+
"additional_inputs must be a tuple of tensors, ints, floats, or bools, got "
|
| 55 |
+
f"{additional_inputs}"
|
| 56 |
+
)
|
| 57 |
+
return super().__call__(cond_fn, body_fn, carried_inputs, additional_inputs)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
while_loop_op = WhileLoopOp()
|
| 61 |
+
# Override while_loop_op.__module__ to "torch.ops.higher_order" so that in the generated
|
| 62 |
+
# graph module, while_loop node's target is correctedly printed as torch.ops.higher_order.while_loop
|
| 63 |
+
while_loop_op.__module__ = "torch.ops.higher_order"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def while_loop(cond_fn, body_fn, carried_inputs):
|
| 67 |
+
r"""
|
| 68 |
+
Run body_fn(*carried_inputs) while cond_fn(*carried_inputs) returns a True scalar tensor. Returns the output of body_fn or
|
| 69 |
+
initial carried_inputs.
|
| 70 |
+
|
| 71 |
+
.. warning::
|
| 72 |
+
`torch.while_loop` is a prototype feature in PyTorch. It has limited support for input and output types and
|
| 73 |
+
doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch.
|
| 74 |
+
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
|
| 75 |
+
|
| 76 |
+
`while_loop` is a structured control flow operator. It preserves the loop semantic across the torch.compile and torch.export.
|
| 77 |
+
|
| 78 |
+
`while_loop` is equivalent to the following:
|
| 79 |
+
|
| 80 |
+
def while_loop(cond_fn, body_fn, carried_inputs):
|
| 81 |
+
val = carried_inputs
|
| 82 |
+
while cond_fn(*val):
|
| 83 |
+
val = body_fn(*val)
|
| 84 |
+
return val
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
cond_fn (Callable): A callable function that returns a boolean Scalar tensor.
|
| 88 |
+
|
| 89 |
+
body_fn (Callable): A callable function that takes the same inputs as `cond_fn` and returns a tuple of tensors
|
| 90 |
+
|
| 91 |
+
carried_inputs (Tuple of possibly nested dict/list/tuple of tensors): A tuple of inputs to cond_fn and body_fn. It's also
|
| 92 |
+
the initial value of states that are carried across iterations.
|
| 93 |
+
|
| 94 |
+
Example:
|
| 95 |
+
|
| 96 |
+
def cond_fn(iter, x):
|
| 97 |
+
return iter.sum() < 10
|
| 98 |
+
|
| 99 |
+
def body_fn(iter, x):
|
| 100 |
+
return iter + 1, x.sin()
|
| 101 |
+
|
| 102 |
+
while_loop(cond_fn, body_fn, (torch.zeros(1), torch.randn(3, 4)))
|
| 103 |
+
|
| 104 |
+
Restrictions:
|
| 105 |
+
|
| 106 |
+
- body_fn must return tensors with the same metadata (e.g.shape, dtype) as inputs.
|
| 107 |
+
|
| 108 |
+
- body_fn and cond_fn must not in-place mutate the carried_inputs. A clone before the mutation is required.
|
| 109 |
+
|
| 110 |
+
- body_fn and cond_fn must not mutate python varialbles (e.g. list/dict) created outside of the body_fn.
|
| 111 |
+
|
| 112 |
+
- body_fn and cond_fn's output cannot aliase any of the inputs. A clone is required.
|
| 113 |
+
|
| 114 |
+
.. warning::
|
| 115 |
+
Temporal Limitations:
|
| 116 |
+
|
| 117 |
+
- 'while_loop' only supports **inference** right now. Autograd will be supported in the future.
|
| 118 |
+
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
# Currently, additional_inputs is not a user-facing input. It will be automatically set in dynamo.
|
| 122 |
+
# parameters and buffers accessed in cond_fn or body_fn or tensor closures will become additional_inputs.
|
| 123 |
+
additional_inputs: Tuple = tuple()
|
| 124 |
+
if torch.compiler.is_dynamo_compiling():
|
| 125 |
+
return while_loop_op(cond_fn, body_fn, carried_inputs, additional_inputs)
|
| 126 |
+
|
| 127 |
+
def _validate_input(cond_fn, body_fn, carried_inputs):
|
| 128 |
+
if not callable(cond_fn) or not callable(body_fn):
|
| 129 |
+
raise RuntimeError("Expect cond_fn and body_fn to be callbale.")
|
| 130 |
+
|
| 131 |
+
if not isinstance(carried_inputs, (tuple, list)) or pytree.tree_any(
|
| 132 |
+
lambda t: not isinstance(t, torch.Tensor), carried_inputs
|
| 133 |
+
):
|
| 134 |
+
raise RuntimeError(
|
| 135 |
+
"Expect carried_inputs to be a tuple of possibly nested dict/list/tuple that only"
|
| 136 |
+
f"consists of tensor leaves, but got {carried_inputs}."
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
_validate_input(cond_fn, body_fn, carried_inputs)
|
| 140 |
+
|
| 141 |
+
with _set_compilation_env(), torch._dynamo.utils.disable_cache_limit():
|
| 142 |
+
return torch.compile(while_loop_op, backend="eager", fullgraph=True)(
|
| 143 |
+
cond_fn, body_fn, carried_inputs, additional_inputs
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
@while_loop_op.py_impl(DispatchKey.CompositeExplicitAutograd)
|
| 148 |
+
def while_loop_dense(cond_fn, body_fn, carried_inputs, additional_inputs):
|
| 149 |
+
carried_vals = carried_inputs
|
| 150 |
+
|
| 151 |
+
def _is_boolean_scalar_tensor(pred):
|
| 152 |
+
return (
|
| 153 |
+
isinstance(pred, torch.Tensor)
|
| 154 |
+
and pred.size() == torch.Size([])
|
| 155 |
+
and pred.dtype == torch.bool
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if not isinstance(carried_inputs, tuple):
|
| 159 |
+
raise RuntimeError(
|
| 160 |
+
f"carried_inputs must be a tuple but got {type(carried_inputs)}"
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
while pred := cond_fn(*carried_vals, *additional_inputs):
|
| 164 |
+
if not _is_boolean_scalar_tensor(pred):
|
| 165 |
+
raise RuntimeError(
|
| 166 |
+
f"cond_fn must return a boolean scalar tensor but got {pred}"
|
| 167 |
+
)
|
| 168 |
+
out = body_fn(*carried_vals, *additional_inputs)
|
| 169 |
+
assert isinstance(
|
| 170 |
+
out, tuple
|
| 171 |
+
), f"body_fn should return a tuple but got {type(out)}"
|
| 172 |
+
assert len(out) == len(
|
| 173 |
+
carried_inputs
|
| 174 |
+
), "body_fn should return the same number of elements as carried_inputs"
|
| 175 |
+
carried_vals = out
|
| 176 |
+
return carried_vals
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
while_loop_op.py_impl(DispatchKey.Autograd)(
|
| 180 |
+
autograd_not_implemented(while_loop_op, deferred_error=True)
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@while_loop_op.py_impl(ProxyTorchDispatchMode)
|
| 185 |
+
def while_loop_tracing(mode, cond_fn, body_fn, carried_inputs, additional_inputs):
|
| 186 |
+
def _trace_while_loop(
|
| 187 |
+
proxy_mode, while_loop_op, cond_fn, body_fn, carried_inputs, additional_inputs
|
| 188 |
+
):
|
| 189 |
+
cond_graph = reenter_make_fx(cond_fn)(*carried_inputs, *additional_inputs)
|
| 190 |
+
body_graph = reenter_make_fx(body_fn)(*carried_inputs, *additional_inputs)
|
| 191 |
+
|
| 192 |
+
next_name = None
|
| 193 |
+
i = 0
|
| 194 |
+
while not next_name:
|
| 195 |
+
candidate = f"while_loop_cond_graph_{i}"
|
| 196 |
+
if hasattr(proxy_mode.tracer.root, candidate):
|
| 197 |
+
i += 1
|
| 198 |
+
else:
|
| 199 |
+
next_name = candidate
|
| 200 |
+
cond_graph_name = next_name
|
| 201 |
+
body_graph_name = f"while_loop_body_graph_{i}"
|
| 202 |
+
assert not hasattr(proxy_mode.tracer.root, body_graph_name)
|
| 203 |
+
|
| 204 |
+
proxy_mode.tracer.root.register_module(cond_graph_name, cond_graph)
|
| 205 |
+
proxy_mode.tracer.root.register_module(body_graph_name, body_graph)
|
| 206 |
+
|
| 207 |
+
args = (cond_graph, body_graph, carried_inputs, additional_inputs)
|
| 208 |
+
|
| 209 |
+
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args)
|
| 210 |
+
|
| 211 |
+
out_proxy = proxy_mode.tracer.create_proxy(
|
| 212 |
+
"call_function", while_loop_op, proxy_args, {}, name="while_loop"
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# body_fn return output with the same pytree and tensor meta data as carried_inputs
|
| 216 |
+
# so we could just return the output after one iteration.
|
| 217 |
+
out = body_fn(*carried_inputs, *additional_inputs)
|
| 218 |
+
return track_tensor_tree(
|
| 219 |
+
out, out_proxy, constant=None, tracer=proxy_mode.tracer
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
if mode.enable_tracing:
|
| 223 |
+
return _trace_while_loop(
|
| 224 |
+
mode, while_loop_op, cond_fn, body_fn, carried_inputs, additional_inputs
|
| 225 |
+
)
|
| 226 |
+
else:
|
| 227 |
+
return while_loop_op(cond_fn, body_fn, carried_inputs, additional_inputs)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@while_loop_op.py_impl(FakeTensorMode)
|
| 231 |
+
def while_loop_fake_tensor_mode(
|
| 232 |
+
mode, cond_fn, body_fn, carried_inputs, additional_inputs
|
| 233 |
+
):
|
| 234 |
+
with mode:
|
| 235 |
+
return body_fn(*carried_inputs, *additional_inputs)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
@while_loop_op.py_functionalize_impl
|
| 239 |
+
def while_loop_func(ctx, cond_fn, body_fn, carried_inputs, additional_inputs):
|
| 240 |
+
unwrapped_carried_inputs = ctx.unwrap_tensors(carried_inputs)
|
| 241 |
+
unwrapped_additional_inputs = ctx.unwrap_tensors(additional_inputs)
|
| 242 |
+
unwrapped_inputs = unwrapped_carried_inputs + unwrapped_additional_inputs
|
| 243 |
+
with ctx.redispatch_to_next() as m:
|
| 244 |
+
functional_cond_fn = ctx.functionalize(cond_fn)
|
| 245 |
+
functional_body_fn = ctx.functionalize(body_fn)
|
| 246 |
+
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
|
| 247 |
+
for fn, fn_name in [
|
| 248 |
+
(functional_cond_fn, "cond_fn"),
|
| 249 |
+
(functional_body_fn, "body_fn"),
|
| 250 |
+
]:
|
| 251 |
+
if _has_potential_branch_input_mutation(
|
| 252 |
+
fn, unwrapped_inputs, pre_dispatch=pre_dispatch
|
| 253 |
+
):
|
| 254 |
+
raise UnsupportedAliasMutationException(
|
| 255 |
+
f"torch.while_loop's {fn_name} might be modifying the input!"
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
if _has_potential_branch_input_alias(
|
| 259 |
+
fn, unwrapped_inputs, pre_dispatch=pre_dispatch
|
| 260 |
+
):
|
| 261 |
+
raise UnsupportedAliasMutationException(
|
| 262 |
+
f"torch.while_loop's {fn_name} might be aliasing the input!"
|
| 263 |
+
)
|
| 264 |
+
ret = while_loop_op(
|
| 265 |
+
functional_cond_fn,
|
| 266 |
+
functional_body_fn,
|
| 267 |
+
unwrapped_carried_inputs,
|
| 268 |
+
unwrapped_additional_inputs,
|
| 269 |
+
)
|
| 270 |
+
return ctx.wrap_tensors(ret)
|
valley/lib/python3.10/site-packages/torch/_higher_order_ops/wrap.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import inspect
|
| 3 |
+
import logging
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch._ops import HigherOrderOperator
|
| 7 |
+
from torch.utils.checkpoint import checkpoint, uid
|
| 8 |
+
import torch._dynamo.config
|
| 9 |
+
|
| 10 |
+
log = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Used for testing the HigherOrderOperator mechanism
|
| 15 |
+
class Wrap(HigherOrderOperator):
|
| 16 |
+
def __init__(self):
|
| 17 |
+
super().__init__("wrap")
|
| 18 |
+
|
| 19 |
+
def __call__(self, func, *args, **kwargs):
|
| 20 |
+
# Dynamo already traces the body of HigherOrderOp beforehand when it
|
| 21 |
+
# so no need to trace into it.
|
| 22 |
+
import torch._dynamo # noqa: F401
|
| 23 |
+
from torch._dynamo import disable
|
| 24 |
+
|
| 25 |
+
@disable
|
| 26 |
+
def wrapper():
|
| 27 |
+
result = func(*args, **kwargs)
|
| 28 |
+
return result
|
| 29 |
+
|
| 30 |
+
return wrapper()
|
| 31 |
+
|
| 32 |
+
wrap = Wrap()
|
| 33 |
+
|
| 34 |
+
class WrapWithSetGradEnabled(HigherOrderOperator):
|
| 35 |
+
def __init__(self):
|
| 36 |
+
super().__init__("wrap_with_set_grad_enabled")
|
| 37 |
+
|
| 38 |
+
def __call__(self, enable_grad, wrapped_func, *args, **kwargs):
|
| 39 |
+
# Dynamo already traces the body of HigherOrderOp beforehand when it
|
| 40 |
+
# so no need to trace into it.
|
| 41 |
+
import torch._dynamo # noqa: F401
|
| 42 |
+
from torch._dynamo import disable
|
| 43 |
+
|
| 44 |
+
@disable
|
| 45 |
+
def wrapper():
|
| 46 |
+
with torch.set_grad_enabled(enable_grad):
|
| 47 |
+
return wrapped_func(*args, **kwargs)
|
| 48 |
+
return wrapper()
|
| 49 |
+
|
| 50 |
+
wrap_with_set_grad_enabled = WrapWithSetGradEnabled()
|
| 51 |
+
|
| 52 |
+
class WrapActivationCheckpoint(HigherOrderOperator):
|
| 53 |
+
"""
|
| 54 |
+
This operator is used to wrap torch.utils.checkpoint. This avoids
|
| 55 |
+
TorchDynamo to look into saved tensor hooks and directly passes the control
|
| 56 |
+
to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of
|
| 57 |
+
AOT tracing torch.utils.checkpoint code, we have a backward graph with
|
| 58 |
+
recomputed forward nodes.
|
| 59 |
+
|
| 60 |
+
However, we might deprecate this operator soon. The difficulty arises in the
|
| 61 |
+
functionalization of rng ops. Today, there are two different
|
| 62 |
+
functionalization of rng ops - one at AOT autograd and other at Inductor.
|
| 63 |
+
And they are difficult to map to each other. The rng states also complicate
|
| 64 |
+
pattern matching in Inductor. Due to the ease of implementation, we are
|
| 65 |
+
currently inclined towards functionalization at Inductor level, which means
|
| 66 |
+
that duplication/recomputation is done as a compiler pass in the
|
| 67 |
+
partitioners. See TagActivationCheckpoint for more information.
|
| 68 |
+
"""
|
| 69 |
+
def __init__(self):
|
| 70 |
+
super().__init__("wrap_activation_checkpoint")
|
| 71 |
+
|
| 72 |
+
def __call__(self, function, *args, **kwargs):
|
| 73 |
+
# use_reentrant is set to False because this op is going to be traced.
|
| 74 |
+
# And we ensure that AOT Autograd traces through the non reentrant
|
| 75 |
+
# version of checkpointing.
|
| 76 |
+
import torch.fx.traceback as fx_traceback
|
| 77 |
+
from torch.fx import Interpreter
|
| 78 |
+
kwargs["use_reentrant"] = False
|
| 79 |
+
kwargs["preserve_rng_state"] = False
|
| 80 |
+
# Using interpreter allows preservation of metadata through torch.compile stack.
|
| 81 |
+
with fx_traceback.preserve_node_meta():
|
| 82 |
+
return checkpoint(Interpreter(function).run, *args, **kwargs)
|
| 83 |
+
|
| 84 |
+
wrap_activation_checkpoint = WrapActivationCheckpoint()
|
| 85 |
+
|
| 86 |
+
class TagActivationCheckpoint(HigherOrderOperator):
|
| 87 |
+
"""
|
| 88 |
+
This operator is supposed to be used only with torch.compile stack. This
|
| 89 |
+
accepts a Fx graph module which needs to be checkpointed. This operator adds
|
| 90 |
+
"recomputable" tag to the nodes of the Fx graph that should be recomputed.
|
| 91 |
+
|
| 92 |
+
The goal is to:
|
| 93 |
+
1. Avoid using Dynamo to trace through saved tensor hooks.
|
| 94 |
+
2. For selective checkpointing case, let AOTAutograd trace through
|
| 95 |
+
saved tensor hooks but has special logic with TorchDispatchMode to override
|
| 96 |
+
the usual saved_tensor_hooks fn logic in order to tag the nodes.
|
| 97 |
+
3. Rely on the partitioners to actually duplicate the nodes.
|
| 98 |
+
This sits well in the torch.compile stack, because by the time graph
|
| 99 |
+
reaches partitioner, inductor has already run its functionalization of rng
|
| 100 |
+
ops (by setting fixed seed for each random op, see `replace_random_passes`).
|
| 101 |
+
Therefore, the duplication of nodes, by design, respects the rng states in
|
| 102 |
+
the forward and recomputed forward in backward.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
def __init__(self):
|
| 106 |
+
super().__init__("tag_activation_checkpoint")
|
| 107 |
+
|
| 108 |
+
@staticmethod
|
| 109 |
+
def divide_kwargs(kwargs):
|
| 110 |
+
"""
|
| 111 |
+
checkpoint fn can have mixed kwargs between checkpointed fn and
|
| 112 |
+
checkpoint fn itself. For example
|
| 113 |
+
>> def gn(x, y, z=None):
|
| 114 |
+
>> a = torch.matmul(x, y)
|
| 115 |
+
>> if z is not None:
|
| 116 |
+
>> return torch.matmul(a, z)
|
| 117 |
+
>> return a
|
| 118 |
+
>> def fn(x, y, z):
|
| 119 |
+
>> return torch.cos(checkpoint(gn, x, y, use_reentrant=False, z=z))
|
| 120 |
+
In the above case, z belongs to checkpointed function gn, but
|
| 121 |
+
use_reentrant belongs to the checkpoint function. This function splits
|
| 122 |
+
the kwargs into checkpoint_kwargs and gmod_kwargs (or
|
| 123 |
+
checkpointed_fn_kwargs).
|
| 124 |
+
We do sorting to ensure same graph from run to run for better
|
| 125 |
+
debuggability. It is not required for correctness.
|
| 126 |
+
"""
|
| 127 |
+
ckpt_signature = inspect.signature(checkpoint)
|
| 128 |
+
checkpoint_keys = set()
|
| 129 |
+
for name in ckpt_signature.parameters:
|
| 130 |
+
if name in ("function", "args", "kwargs"):
|
| 131 |
+
continue
|
| 132 |
+
checkpoint_keys.add(name)
|
| 133 |
+
|
| 134 |
+
# `preserve_rng_state` is not a regular kwarg
|
| 135 |
+
checkpoint_keys.add("preserve_rng_state")
|
| 136 |
+
|
| 137 |
+
checkpoint_kwargs = {name: kwargs[name] for name in kwargs.keys() if name in checkpoint_keys}
|
| 138 |
+
gmod_kwargs = {name: kwargs[name] for name in kwargs.keys() if name not in checkpoint_keys}
|
| 139 |
+
return checkpoint_kwargs, gmod_kwargs
|
| 140 |
+
|
| 141 |
+
def tag_nodes(self, gmod):
|
| 142 |
+
unique_graph_id = next(uid)
|
| 143 |
+
for node in gmod.graph.nodes:
|
| 144 |
+
if node.op in ("call_function", "call_method", "call_module"):
|
| 145 |
+
node.meta["recompute"] = unique_graph_id
|
| 146 |
+
return gmod
|
| 147 |
+
|
| 148 |
+
def __call__(self, gmod, *args, **kwargs):
|
| 149 |
+
import torch.fx.traceback as fx_traceback
|
| 150 |
+
from torch.fx import Interpreter
|
| 151 |
+
if "_checkpoint_context_fn" in gmod.meta:
|
| 152 |
+
assert torch._dynamo.config._experimental_support_context_fn_in_torch_utils_checkpoint, \
|
| 153 |
+
"Passing context_fn to torch.utils.checkpoint is currently not supported under torch.compile"
|
| 154 |
+
log.warning("""
|
| 155 |
+
Detected that context_fn is passed to torch.utils.checkpoint under torch.compile.
|
| 156 |
+
Please make sure the checkpointed region does not contain in-place ops (e.g. torch.relu_).
|
| 157 |
+
""")
|
| 158 |
+
# use_reentrant is set to False because this op is going to be traced.
|
| 159 |
+
# And we ensure that AOT Autograd traces through the non reentrant
|
| 160 |
+
# version of checkpointing.
|
| 161 |
+
kwargs["use_reentrant"] = False
|
| 162 |
+
# preserve_rng_state is set to False because we want to prevent AOTAutograd from tracing through
|
| 163 |
+
# `torch.random.fork_rng` op (which is not supported yet under CUDA).
|
| 164 |
+
# This doesn't mean that we don't preserve RNG state. Instead, we will always preserve RNG state
|
| 165 |
+
# regardless of this flag (by doing RNG functionalization via `replace_random_passes` in Inductor
|
| 166 |
+
# instead of in AOTAutograd).
|
| 167 |
+
kwargs["preserve_rng_state"] = False
|
| 168 |
+
kwargs["context_fn"] = gmod.meta["_checkpoint_context_fn"]
|
| 169 |
+
# We first tag all nodes as "recompute" in this graph, and then we undo the "recompute" tag
|
| 170 |
+
# for specific nodes in _CachingTorchDispatchMode in torch/utils/checkpoint.py.
|
| 171 |
+
gmod = self.tag_nodes(gmod)
|
| 172 |
+
# Using interpreter allows preservation of metadata through torch.compile stack.
|
| 173 |
+
with fx_traceback.preserve_node_meta():
|
| 174 |
+
return checkpoint(Interpreter(gmod).run, *args, **kwargs)
|
| 175 |
+
else:
|
| 176 |
+
gmod = self.tag_nodes(gmod)
|
| 177 |
+
# Using interpreter allows preservation of metadata through torch.compile stack.
|
| 178 |
+
# TODO: We want to use the same `checkpoint(Interpreter(gmod).run, *args, **kwargs)` here
|
| 179 |
+
# as the `context_fn != None` case, but that depends on in-place op support in TorchDispatchMode + torch.compile.
|
| 180 |
+
# (for details on in-place op issue, run `test_compile_selective_checkpoint_inplace_op` unit test)
|
| 181 |
+
with fx_traceback.preserve_node_meta():
|
| 182 |
+
return Interpreter(gmod).run(*args)
|
| 183 |
+
|
| 184 |
+
tag_activation_checkpoint = TagActivationCheckpoint()
|
valley/lib/python3.10/site-packages/torch/futures/__init__.py
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
__all__ = ['Future', 'collect_all', 'wait_all']
|
| 9 |
+
|
| 10 |
+
T = TypeVar("T")
|
| 11 |
+
S = TypeVar("S")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
|
| 19 |
+
r"""
|
| 20 |
+
Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
|
| 21 |
+
execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
|
| 22 |
+
also exposes a set of APIs to add callback functions and set results.
|
| 23 |
+
|
| 24 |
+
.. warning:: GPU support is a beta feature, subject to changes.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
|
| 28 |
+
r"""
|
| 29 |
+
Create an empty unset ``Future``. If the future is intended to hold
|
| 30 |
+
values containing CUDA tensors, (a superset of) their CUDA devices must
|
| 31 |
+
be specified at construction. (This is only supported if
|
| 32 |
+
``torch.cuda.is_available()`` returns ``True``). This is needed to
|
| 33 |
+
ensure proper CUDA stream synchronization. The child futures, returned
|
| 34 |
+
by the ``then`` method, will inherit these devices.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
devices(``List[Union[int, str, torch.device]]``, optional): the set
|
| 38 |
+
of devices on which tensors contained in this future's value are
|
| 39 |
+
allowed to reside and on which callbacks are allowed to operate.
|
| 40 |
+
"""
|
| 41 |
+
if devices is None:
|
| 42 |
+
devices = []
|
| 43 |
+
super().__init__([torch.device(d) for d in devices])
|
| 44 |
+
|
| 45 |
+
def done(self) -> bool:
|
| 46 |
+
r"""
|
| 47 |
+
Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
|
| 48 |
+
has a result or an exception.
|
| 49 |
+
|
| 50 |
+
If the value contains tensors that reside on GPUs, ``Future.done()``
|
| 51 |
+
will return ``True`` even if the asynchronous kernels that are
|
| 52 |
+
populating those tensors haven't yet completed running on the device,
|
| 53 |
+
because at such stage the result is already usable, provided one
|
| 54 |
+
performs the appropriate synchronizations (see :meth:`wait`).
|
| 55 |
+
"""
|
| 56 |
+
return super().done()
|
| 57 |
+
|
| 58 |
+
def wait(self) -> T:
|
| 59 |
+
r"""
|
| 60 |
+
Block until the value of this ``Future`` is ready.
|
| 61 |
+
|
| 62 |
+
If the value contains tensors that reside on GPUs, then an additional
|
| 63 |
+
synchronization is performed with the kernels (executing on the device)
|
| 64 |
+
which may be asynchronously populating those tensors. Such sync is
|
| 65 |
+
non-blocking, which means that ``wait()`` will insert the necessary
|
| 66 |
+
instructions in the current streams to ensure that further operations
|
| 67 |
+
enqueued on those streams will be properly scheduled after the async
|
| 68 |
+
kernels but, once that is done, ``wait()`` will return, even if those
|
| 69 |
+
kernels are still running. No further synchronization is required when
|
| 70 |
+
accessing and using the values, as long as one doesn't change streams.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
The value held by this ``Future``. If the function (callback or RPC)
|
| 74 |
+
creating the value has thrown an error, this ``wait`` method will
|
| 75 |
+
also throw an error.
|
| 76 |
+
"""
|
| 77 |
+
return super().wait()
|
| 78 |
+
|
| 79 |
+
def value(self) -> T:
|
| 80 |
+
r"""
|
| 81 |
+
Obtain the value of an already-completed future.
|
| 82 |
+
|
| 83 |
+
This method should only be called after a call to :meth:`wait` has
|
| 84 |
+
completed, or inside a callback function passed to :meth:`then`. In
|
| 85 |
+
other cases this ``Future`` may not yet hold a value and calling
|
| 86 |
+
``value()`` could fail.
|
| 87 |
+
|
| 88 |
+
If the value contains tensors that reside on GPUs, then this method will
|
| 89 |
+
*not* perform any additional synchronization. This should be done
|
| 90 |
+
beforehand, separately, through a call to :meth:`wait` (except within
|
| 91 |
+
callbacks, for which it's already being taken care of by :meth:`then`).
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
The value held by this ``Future``. If the function (callback or RPC)
|
| 95 |
+
creating the value has thrown an error, this ``value()`` method will
|
| 96 |
+
also throw an error.
|
| 97 |
+
"""
|
| 98 |
+
return super().value()
|
| 99 |
+
|
| 100 |
+
def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
|
| 101 |
+
r"""
|
| 102 |
+
Append the given callback function to this ``Future``, which will be run
|
| 103 |
+
when the ``Future`` is completed. Multiple callbacks can be added to
|
| 104 |
+
the same ``Future``, but the order in which they will be executed cannot
|
| 105 |
+
be guaranteed (to enforce a certain order consider chaining:
|
| 106 |
+
``fut.then(cb1).then(cb2)``). The callback must take one argument, which
|
| 107 |
+
is the reference to this ``Future``. The callback function can use the
|
| 108 |
+
:meth:`value` method to get the value. Note that if this ``Future`` is
|
| 109 |
+
already completed, the given callback will be run immediately inline.
|
| 110 |
+
|
| 111 |
+
If the ``Future``'s value contains tensors that reside on GPUs, the
|
| 112 |
+
callback might be invoked while the async kernels that are populating
|
| 113 |
+
those tensors haven't yet finished executing on the device. However, the
|
| 114 |
+
callback will be invoked with some dedicated streams set as current
|
| 115 |
+
(fetched from a global pool) which will be synchronized with those
|
| 116 |
+
kernels. Hence any operation performed by the callback on these tensors
|
| 117 |
+
will be scheduled on the device after the kernels complete. In other
|
| 118 |
+
words, as long as the callback doesn't switch streams, it can safely
|
| 119 |
+
manipulate the result without any additional synchronization. This is
|
| 120 |
+
similar to the non-blocking behavior of :meth:`wait`.
|
| 121 |
+
|
| 122 |
+
Similarly, if the callback returns a value that contains tensors that
|
| 123 |
+
reside on a GPU, it can do so even if the kernels that are producing
|
| 124 |
+
these tensors are still running on the device, as long as the callback
|
| 125 |
+
didn't change streams during its execution. If one wants to change
|
| 126 |
+
streams, one must be careful to re-synchronize them with the original
|
| 127 |
+
streams, that is, those that were current when the callback was invoked.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
callback(``Callable``): a ``Callable`` that takes this ``Future`` as
|
| 131 |
+
the only argument.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
A new ``Future`` object that holds the return value of the
|
| 135 |
+
``callback`` and will be marked as completed when the given
|
| 136 |
+
``callback`` finishes.
|
| 137 |
+
|
| 138 |
+
.. note:: Note that if the callback function throws, either
|
| 139 |
+
through the original future being completed with an exception and
|
| 140 |
+
calling ``fut.wait()``, or through other code in the callback, the
|
| 141 |
+
future returned by ``then`` will be marked appropriately with the
|
| 142 |
+
encountered error. However, if this callback later completes
|
| 143 |
+
additional futures, those futures are not marked as completed with
|
| 144 |
+
an error and the user is responsible for handling completion/waiting
|
| 145 |
+
on those futures independently.
|
| 146 |
+
|
| 147 |
+
Example::
|
| 148 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
|
| 149 |
+
>>> def callback(fut):
|
| 150 |
+
... print(f"RPC return value is {fut.wait()}.")
|
| 151 |
+
>>> fut = torch.futures.Future()
|
| 152 |
+
>>> # The inserted callback will print the return value when
|
| 153 |
+
>>> # receiving the response from "worker1"
|
| 154 |
+
>>> cb_fut = fut.then(callback)
|
| 155 |
+
>>> chain_cb_fut = cb_fut.then(
|
| 156 |
+
... lambda x : print(f"Chained cb done. {x.wait()}")
|
| 157 |
+
... )
|
| 158 |
+
>>> fut.set_result(5)
|
| 159 |
+
RPC return value is 5.
|
| 160 |
+
Chained cb done. None
|
| 161 |
+
"""
|
| 162 |
+
return cast(Future[S], super().then(callback))
|
| 163 |
+
|
| 164 |
+
def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
|
| 165 |
+
r"""
|
| 166 |
+
Append the given callback function to this ``Future``, which will be run
|
| 167 |
+
when the ``Future`` is completed. Multiple callbacks can be added to
|
| 168 |
+
the same ``Future``, but the order in which they will be executed cannot
|
| 169 |
+
be guaranteed. The callback must take one argument, which is the
|
| 170 |
+
reference to this ``Future``. The callback function can use the
|
| 171 |
+
:meth:`value` method to get the value. Note that if this ``Future`` is
|
| 172 |
+
already completed, the given callback will be run inline.
|
| 173 |
+
|
| 174 |
+
We recommend that you use the :meth:`then` method as it provides a way
|
| 175 |
+
to synchronize after your callback has completed. ``add_done_callback``
|
| 176 |
+
can be cheaper if your callback does not return anything. But both
|
| 177 |
+
:meth:`then` and ``add_done_callback`` use the same callback
|
| 178 |
+
registration API under the hood.
|
| 179 |
+
|
| 180 |
+
With respect to GPU tensors, this method behaves in the same way as
|
| 181 |
+
:meth:`then`.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
callback(``Future``): a ``Callable`` that takes in one argument,
|
| 185 |
+
which is the reference to this ``Future``.
|
| 186 |
+
|
| 187 |
+
.. note:: Note that if the callback function throws, either
|
| 188 |
+
through the original future being completed with an exception and
|
| 189 |
+
calling ``fut.wait()``, or through other code in the callback,
|
| 190 |
+
error handling must be carefully taken care of. For example, if
|
| 191 |
+
this callback later completes additional futures, those futures are
|
| 192 |
+
not marked as completed with an error and the user is responsible
|
| 193 |
+
for handling completion/waiting on those futures independently.
|
| 194 |
+
|
| 195 |
+
Example::
|
| 196 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
|
| 197 |
+
>>> def callback(fut):
|
| 198 |
+
... print("This will run after the future has finished.")
|
| 199 |
+
... print(fut.wait())
|
| 200 |
+
>>> fut = torch.futures.Future()
|
| 201 |
+
>>> fut.add_done_callback(callback)
|
| 202 |
+
>>> fut.set_result(5)
|
| 203 |
+
This will run after the future has finished.
|
| 204 |
+
5
|
| 205 |
+
"""
|
| 206 |
+
super().add_done_callback(callback)
|
| 207 |
+
|
| 208 |
+
def set_result(self, result: T) -> None:
|
| 209 |
+
r"""
|
| 210 |
+
Set the result for this ``Future``, which will mark this ``Future`` as
|
| 211 |
+
completed and trigger all attached callbacks. Note that a ``Future``
|
| 212 |
+
cannot be marked completed twice.
|
| 213 |
+
|
| 214 |
+
If the result contains tensors that reside on GPUs, this method can be
|
| 215 |
+
called even if the asynchronous kernels that are populating those
|
| 216 |
+
tensors haven't yet completed running on the device, provided that the
|
| 217 |
+
streams on which those kernels were enqueued are set as the current ones
|
| 218 |
+
when this method is called. Put simply, it's safe to call this method
|
| 219 |
+
immediately after launching those kernels, without any additional
|
| 220 |
+
synchronization, as long as one doesn't change streams in between. This
|
| 221 |
+
method will record events on all the relevant current streams and will
|
| 222 |
+
use them to ensure proper scheduling for all the consumers of this
|
| 223 |
+
``Future``.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
result (object): the result object of this ``Future``.
|
| 227 |
+
|
| 228 |
+
Example::
|
| 229 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
|
| 230 |
+
>>> import threading
|
| 231 |
+
>>> import time
|
| 232 |
+
>>> def slow_set_future(fut, value):
|
| 233 |
+
... time.sleep(0.5)
|
| 234 |
+
... fut.set_result(value)
|
| 235 |
+
>>> fut = torch.futures.Future()
|
| 236 |
+
>>> t = threading.Thread(
|
| 237 |
+
... target=slow_set_future,
|
| 238 |
+
... args=(fut, torch.ones(2) * 3)
|
| 239 |
+
... )
|
| 240 |
+
>>> t.start()
|
| 241 |
+
>>> print(fut.wait())
|
| 242 |
+
tensor([3., 3.])
|
| 243 |
+
>>> t.join()
|
| 244 |
+
"""
|
| 245 |
+
super().set_result(result)
|
| 246 |
+
|
| 247 |
+
def set_exception(self, result: T) -> None:
|
| 248 |
+
r"""
|
| 249 |
+
Set an exception for this ``Future``, which will mark this ``Future`` as
|
| 250 |
+
completed with an error and trigger all attached callbacks. Note that
|
| 251 |
+
when calling wait()/value() on this ``Future``, the exception set here
|
| 252 |
+
will be raised inline.
|
| 253 |
+
|
| 254 |
+
Args:
|
| 255 |
+
result (BaseException): the exception for this ``Future``.
|
| 256 |
+
|
| 257 |
+
Example::
|
| 258 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
|
| 259 |
+
>>> fut = torch.futures.Future()
|
| 260 |
+
>>> fut.set_exception(ValueError("foo"))
|
| 261 |
+
>>> fut.wait()
|
| 262 |
+
Traceback (most recent call last):
|
| 263 |
+
...
|
| 264 |
+
ValueError: foo
|
| 265 |
+
"""
|
| 266 |
+
assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
|
| 267 |
+
|
| 268 |
+
def raise_error(fut_result):
|
| 269 |
+
raise fut_result
|
| 270 |
+
|
| 271 |
+
super()._set_unwrap_func(raise_error)
|
| 272 |
+
self.set_result(result) # type: ignore[arg-type]
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def collect_all(futures: List[Future]) -> Future[List[Future]]:
|
| 276 |
+
r"""
|
| 277 |
+
Collects the provided :class:`~torch.futures.Future` objects into a single
|
| 278 |
+
combined :class:`~torch.futures.Future` that is completed when all of the
|
| 279 |
+
sub-futures are completed.
|
| 280 |
+
|
| 281 |
+
Args:
|
| 282 |
+
futures (list): a list of :class:`~torch.futures.Future` objects.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
Returns a :class:`~torch.futures.Future` object to a list of the passed
|
| 286 |
+
in Futures.
|
| 287 |
+
|
| 288 |
+
Example::
|
| 289 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES)
|
| 290 |
+
>>> fut0 = torch.futures.Future()
|
| 291 |
+
>>> fut1 = torch.futures.Future()
|
| 292 |
+
>>> fut = torch.futures.collect_all([fut0, fut1])
|
| 293 |
+
>>> fut0.set_result(0)
|
| 294 |
+
>>> fut1.set_result(1)
|
| 295 |
+
>>> fut_list = fut.wait()
|
| 296 |
+
>>> print(f"fut0 result = {fut_list[0].wait()}")
|
| 297 |
+
fut0 result = 0
|
| 298 |
+
>>> print(f"fut1 result = {fut_list[1].wait()}")
|
| 299 |
+
fut1 result = 1
|
| 300 |
+
"""
|
| 301 |
+
return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def wait_all(futures: List[Future]) -> List:
|
| 305 |
+
r"""
|
| 306 |
+
Waits for all provided futures to be complete, and returns
|
| 307 |
+
the list of completed values. If any of the futures encounters an error,
|
| 308 |
+
the method will exit early and report the error not waiting for other
|
| 309 |
+
futures to complete.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
futures (list): a list of :class:`~torch.futures.Future` object.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
A list of the completed :class:`~torch.futures.Future` results. This
|
| 316 |
+
method will throw an error if ``wait`` on any
|
| 317 |
+
:class:`~torch.futures.Future` throws.
|
| 318 |
+
"""
|
| 319 |
+
return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
|
valley/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (848 Bytes). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc
ADDED
|
Binary file (5.74 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc
ADDED
|
Binary file (81.6 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc
ADDED
|
Binary file (4.08 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc
ADDED
|
Binary file (5.13 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc
ADDED
|
Binary file (4.56 kB). View file
|
|
|
valley/lib/python3.10/site-packages/torch/nn/parallel/_functions.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from . import comm
|
| 5 |
+
from torch.autograd import Function
|
| 6 |
+
from torch._utils import _get_device_index
|
| 7 |
+
from typing import List, Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Broadcast(Function):
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def forward(ctx, target_gpus, *inputs):
|
| 14 |
+
assert all(i.device.type != 'cpu' for i in inputs), (
|
| 15 |
+
'Broadcast function not implemented for CPU tensors'
|
| 16 |
+
)
|
| 17 |
+
target_gpus = [_get_device_index(x, True) for x in target_gpus]
|
| 18 |
+
ctx.target_gpus = target_gpus
|
| 19 |
+
if len(inputs) == 0:
|
| 20 |
+
return tuple()
|
| 21 |
+
ctx.num_inputs = len(inputs)
|
| 22 |
+
ctx.input_device = inputs[0].get_device()
|
| 23 |
+
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
|
| 24 |
+
non_differentiables = []
|
| 25 |
+
for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
|
| 26 |
+
if not input_requires_grad:
|
| 27 |
+
for output in outputs:
|
| 28 |
+
non_differentiables.append(output[idx])
|
| 29 |
+
ctx.mark_non_differentiable(*non_differentiables)
|
| 30 |
+
return tuple([t for tensors in outputs for t in tensors])
|
| 31 |
+
|
| 32 |
+
@staticmethod
|
| 33 |
+
def backward(ctx, *grad_outputs):
|
| 34 |
+
return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ReduceAddCoalesced(Function):
|
| 38 |
+
|
| 39 |
+
@staticmethod
|
| 40 |
+
def forward(ctx, destination, num_inputs, *grads):
|
| 41 |
+
ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
|
| 42 |
+
|
| 43 |
+
grads_ = [grads[i:i + num_inputs]
|
| 44 |
+
for i in range(0, len(grads), num_inputs)]
|
| 45 |
+
return comm.reduce_add_coalesced(grads_, destination)
|
| 46 |
+
|
| 47 |
+
@staticmethod
|
| 48 |
+
def backward(ctx, *grad_outputs):
|
| 49 |
+
return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class Gather(Function):
|
| 53 |
+
|
| 54 |
+
@staticmethod
|
| 55 |
+
def forward(ctx, target_device, dim, *inputs):
|
| 56 |
+
assert all(i.device.type != 'cpu' for i in inputs), (
|
| 57 |
+
'Gather function not implemented for CPU tensors'
|
| 58 |
+
)
|
| 59 |
+
if (target_device == 'cpu'):
|
| 60 |
+
ctx.target_device = 'cpu'
|
| 61 |
+
else:
|
| 62 |
+
target_device = _get_device_index(target_device, True)
|
| 63 |
+
ctx.target_device = target_device
|
| 64 |
+
ctx.dim = dim
|
| 65 |
+
ctx.input_gpus = tuple(i.get_device() for i in inputs)
|
| 66 |
+
if all(t.dim() == 0 for t in inputs) and dim == 0:
|
| 67 |
+
inputs = tuple(t.view(1) for t in inputs)
|
| 68 |
+
warnings.warn('Was asked to gather along dimension 0, but all '
|
| 69 |
+
'input tensors were scalars; will instead unsqueeze '
|
| 70 |
+
'and return a vector.')
|
| 71 |
+
ctx.unsqueezed_scalar = True
|
| 72 |
+
else:
|
| 73 |
+
ctx.unsqueezed_scalar = False
|
| 74 |
+
ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs)
|
| 75 |
+
return comm.gather(inputs, ctx.dim, ctx.target_device)
|
| 76 |
+
|
| 77 |
+
@staticmethod
|
| 78 |
+
def backward(ctx, grad_output):
|
| 79 |
+
scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
|
| 80 |
+
if ctx.unsqueezed_scalar:
|
| 81 |
+
scattered_grads = tuple(g[0] for g in scattered_grads)
|
| 82 |
+
return (None, None) + scattered_grads
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class Scatter(Function):
|
| 86 |
+
|
| 87 |
+
@staticmethod
|
| 88 |
+
def forward(ctx, target_gpus, chunk_sizes, dim, input):
|
| 89 |
+
target_gpus = [_get_device_index(x, True) for x in target_gpus]
|
| 90 |
+
ctx.dim = dim
|
| 91 |
+
ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
|
| 92 |
+
streams = None
|
| 93 |
+
if torch.cuda.is_available() and ctx.input_device == -1:
|
| 94 |
+
# Perform CPU to GPU copies in a background stream
|
| 95 |
+
streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus]
|
| 96 |
+
outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
|
| 97 |
+
# Synchronize with the copy stream
|
| 98 |
+
if streams is not None:
|
| 99 |
+
for i, output in enumerate(outputs):
|
| 100 |
+
with torch.cuda.device(target_gpus[i]):
|
| 101 |
+
main_stream = torch.cuda.current_stream()
|
| 102 |
+
main_stream.wait_stream(streams[i])
|
| 103 |
+
output.record_stream(main_stream)
|
| 104 |
+
return outputs
|
| 105 |
+
|
| 106 |
+
@staticmethod
|
| 107 |
+
def backward(ctx, *grad_output):
|
| 108 |
+
return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# background streams used for copying
|
| 112 |
+
_streams: Optional[List[Optional[torch.Stream]]] = None
|
| 113 |
+
|
| 114 |
+
def _get_stream(device: torch.device):
|
| 115 |
+
"""Get a background stream for copying between CPU and target device."""
|
| 116 |
+
global _streams
|
| 117 |
+
if device.type == "cpu":
|
| 118 |
+
return None
|
| 119 |
+
device_mod = getattr(torch, device.type, None)
|
| 120 |
+
if device_mod is None:
|
| 121 |
+
return None
|
| 122 |
+
if _streams is None:
|
| 123 |
+
_streams = [None] * device_mod.device_count()
|
| 124 |
+
if _streams[device.index] is None:
|
| 125 |
+
_streams[device.index] = device_mod.Stream(device.index)
|
| 126 |
+
return _streams[device.index]
|
valley/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import operator
|
| 3 |
+
import torch
|
| 4 |
+
import warnings
|
| 5 |
+
from itertools import chain
|
| 6 |
+
from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union
|
| 7 |
+
from ..modules import Module
|
| 8 |
+
from .scatter_gather import scatter_kwargs, gather
|
| 9 |
+
from .replicate import replicate
|
| 10 |
+
from .parallel_apply import parallel_apply
|
| 11 |
+
from torch._utils import (
|
| 12 |
+
_get_all_device_indices,
|
| 13 |
+
_get_available_device_type,
|
| 14 |
+
_get_device_index,
|
| 15 |
+
_get_devices_properties
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
__all__ = ['DataParallel', 'data_parallel']
|
| 19 |
+
|
| 20 |
+
def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None:
|
| 21 |
+
imbalance_warn = """
|
| 22 |
+
There is an imbalance between your GPUs. You may want to exclude GPU {} which
|
| 23 |
+
has less than 75% of the memory or cores of GPU {}. You can do so by setting
|
| 24 |
+
the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
|
| 25 |
+
environment variable."""
|
| 26 |
+
device_ids = [_get_device_index(x, True) for x in device_ids]
|
| 27 |
+
dev_props = _get_devices_properties(device_ids)
|
| 28 |
+
|
| 29 |
+
def warn_imbalance(get_prop):
|
| 30 |
+
values = [get_prop(props) for props in dev_props]
|
| 31 |
+
min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
|
| 32 |
+
max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
|
| 33 |
+
if min_val / max_val < 0.75:
|
| 34 |
+
warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
|
| 35 |
+
return True
|
| 36 |
+
return False
|
| 37 |
+
|
| 38 |
+
if warn_imbalance(lambda props: props.total_memory):
|
| 39 |
+
return
|
| 40 |
+
if warn_imbalance(lambda props: props.multi_processor_count):
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
T = TypeVar("T", bound=Module)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class DataParallel(Module, Generic[T]):
|
| 48 |
+
r"""Implements data parallelism at the module level.
|
| 49 |
+
|
| 50 |
+
This container parallelizes the application of the given :attr:`module` by
|
| 51 |
+
splitting the input across the specified devices by chunking in the batch
|
| 52 |
+
dimension (other objects will be copied once per device). In the forward
|
| 53 |
+
pass, the module is replicated on each device, and each replica handles a
|
| 54 |
+
portion of the input. During the backwards pass, gradients from each replica
|
| 55 |
+
are summed into the original module.
|
| 56 |
+
|
| 57 |
+
The batch size should be larger than the number of GPUs used.
|
| 58 |
+
|
| 59 |
+
.. warning::
|
| 60 |
+
It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
|
| 61 |
+
instead of this class, to do multi-GPU training, even if there is only a single
|
| 62 |
+
node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
|
| 63 |
+
|
| 64 |
+
Arbitrary positional and keyword inputs are allowed to be passed into
|
| 65 |
+
DataParallel but some types are specially handled. tensors will be
|
| 66 |
+
**scattered** on dim specified (default 0). tuple, list and dict types will
|
| 67 |
+
be shallow copied. The other types will be shared among different threads
|
| 68 |
+
and can be corrupted if written to in the model's forward pass.
|
| 69 |
+
|
| 70 |
+
The parallelized :attr:`module` must have its parameters and buffers on
|
| 71 |
+
``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
|
| 72 |
+
module.
|
| 73 |
+
|
| 74 |
+
.. warning::
|
| 75 |
+
In each forward, :attr:`module` is **replicated** on each device, so any
|
| 76 |
+
updates to the running module in ``forward`` will be lost. For example,
|
| 77 |
+
if :attr:`module` has a counter attribute that is incremented in each
|
| 78 |
+
``forward``, it will always stay at the initial value because the update
|
| 79 |
+
is done on the replicas which are destroyed after ``forward``. However,
|
| 80 |
+
:class:`~torch.nn.DataParallel` guarantees that the replica on
|
| 81 |
+
``device[0]`` will have its parameters and buffers sharing storage with
|
| 82 |
+
the base parallelized :attr:`module`. So **in-place** updates to the
|
| 83 |
+
parameters or buffers on ``device[0]`` will be recorded. E.g.,
|
| 84 |
+
:class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
|
| 85 |
+
rely on this behavior to update the buffers.
|
| 86 |
+
|
| 87 |
+
.. warning::
|
| 88 |
+
Forward and backward hooks defined on :attr:`module` and its submodules
|
| 89 |
+
will be invoked ``len(device_ids)`` times, each with inputs located on
|
| 90 |
+
a particular device. Particularly, the hooks are only guaranteed to be
|
| 91 |
+
executed in correct order with respect to operations on corresponding
|
| 92 |
+
devices. For example, it is not guaranteed that hooks set via
|
| 93 |
+
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
|
| 94 |
+
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
|
| 95 |
+
that each such hook be executed before the corresponding
|
| 96 |
+
:meth:`~torch.nn.Module.forward` call of that device.
|
| 97 |
+
|
| 98 |
+
.. warning::
|
| 99 |
+
When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
|
| 100 |
+
:func:`forward`, this wrapper will return a vector of length equal to
|
| 101 |
+
number of devices used in data parallelism, containing the result from
|
| 102 |
+
each device.
|
| 103 |
+
|
| 104 |
+
.. note::
|
| 105 |
+
There is a subtlety in using the
|
| 106 |
+
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
|
| 107 |
+
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
|
| 108 |
+
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
|
| 109 |
+
details.
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
module (Module): module to be parallelized
|
| 114 |
+
device_ids (list of int or torch.device): CUDA devices (default: all devices)
|
| 115 |
+
output_device (int or torch.device): device location of output (default: device_ids[0])
|
| 116 |
+
|
| 117 |
+
Attributes:
|
| 118 |
+
module (Module): the module to be parallelized
|
| 119 |
+
|
| 120 |
+
Example::
|
| 121 |
+
|
| 122 |
+
>>> # xdoctest: +SKIP
|
| 123 |
+
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
|
| 124 |
+
>>> output = net(input_var) # input_var can be on any device, including CPU
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
|
| 128 |
+
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
module: T,
|
| 132 |
+
device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
|
| 133 |
+
output_device: Optional[Union[int, torch.device]] = None,
|
| 134 |
+
dim: int = 0,
|
| 135 |
+
) -> None:
|
| 136 |
+
super().__init__()
|
| 137 |
+
torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
|
| 138 |
+
device_type = _get_available_device_type()
|
| 139 |
+
if device_type is None:
|
| 140 |
+
self.module = module
|
| 141 |
+
self.device_ids = []
|
| 142 |
+
return
|
| 143 |
+
|
| 144 |
+
if device_ids is None:
|
| 145 |
+
device_ids = _get_all_device_indices()
|
| 146 |
+
|
| 147 |
+
if device_ids is None:
|
| 148 |
+
raise RuntimeError("no available devices were found")
|
| 149 |
+
|
| 150 |
+
if output_device is None:
|
| 151 |
+
output_device = device_ids[0]
|
| 152 |
+
|
| 153 |
+
self.dim = dim
|
| 154 |
+
self.module = module
|
| 155 |
+
self.device_ids = [_get_device_index(x, True) for x in device_ids]
|
| 156 |
+
self.output_device = _get_device_index(output_device, True)
|
| 157 |
+
self.src_device_obj = torch.device(device_type, self.device_ids[0])
|
| 158 |
+
|
| 159 |
+
if device_type == "cuda":
|
| 160 |
+
_check_balance(self.device_ids)
|
| 161 |
+
|
| 162 |
+
if len(self.device_ids) == 1:
|
| 163 |
+
self.module.to(self.src_device_obj)
|
| 164 |
+
|
| 165 |
+
def forward(self, *inputs: Any, **kwargs: Any) -> Any:
|
| 166 |
+
with torch.autograd.profiler.record_function("DataParallel.forward"):
|
| 167 |
+
if not self.device_ids:
|
| 168 |
+
return self.module(*inputs, **kwargs)
|
| 169 |
+
|
| 170 |
+
for t in chain(self.module.parameters(), self.module.buffers()):
|
| 171 |
+
if t.device != self.src_device_obj:
|
| 172 |
+
raise RuntimeError("module must have its parameters and buffers "
|
| 173 |
+
f"on device {self.src_device_obj} (device_ids[0]) but found one of "
|
| 174 |
+
f"them on device: {t.device}")
|
| 175 |
+
|
| 176 |
+
inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids)
|
| 177 |
+
# for forward function without any inputs, empty list and dict will be created
|
| 178 |
+
# so the module can be executed on one device which is the first one in device_ids
|
| 179 |
+
if not inputs and not module_kwargs:
|
| 180 |
+
inputs = ((),)
|
| 181 |
+
module_kwargs = ({},)
|
| 182 |
+
|
| 183 |
+
if len(self.device_ids) == 1:
|
| 184 |
+
return self.module(*inputs[0], **module_kwargs[0])
|
| 185 |
+
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
|
| 186 |
+
outputs = self.parallel_apply(replicas, inputs, module_kwargs)
|
| 187 |
+
return self.gather(outputs, self.output_device)
|
| 188 |
+
|
| 189 |
+
def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]:
|
| 190 |
+
return replicate(module, device_ids, not torch.is_grad_enabled())
|
| 191 |
+
|
| 192 |
+
def scatter(
|
| 193 |
+
self,
|
| 194 |
+
inputs: Tuple[Any, ...],
|
| 195 |
+
kwargs: Optional[Dict[str, Any]],
|
| 196 |
+
device_ids: Sequence[Union[int, torch.device]],
|
| 197 |
+
) -> Any:
|
| 198 |
+
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
| 199 |
+
|
| 200 |
+
def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]:
|
| 201 |
+
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
|
| 202 |
+
|
| 203 |
+
def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any:
|
| 204 |
+
return gather(outputs, output_device, dim=self.dim)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def data_parallel(
|
| 208 |
+
module: Module,
|
| 209 |
+
inputs: Any,
|
| 210 |
+
device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
|
| 211 |
+
output_device: Optional[Union[int, torch.device]] = None,
|
| 212 |
+
dim: int = 0,
|
| 213 |
+
module_kwargs: Optional[Any] = None,
|
| 214 |
+
) -> torch.Tensor:
|
| 215 |
+
r"""Evaluate module(input) in parallel across the GPUs given in device_ids.
|
| 216 |
+
|
| 217 |
+
This is the functional version of the DataParallel module.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
module (Module): the module to evaluate in parallel
|
| 221 |
+
inputs (Tensor): inputs to the module
|
| 222 |
+
device_ids (list of int or torch.device): GPU ids on which to replicate module
|
| 223 |
+
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
|
| 224 |
+
(default: device_ids[0])
|
| 225 |
+
Returns:
|
| 226 |
+
a Tensor containing the result of module(input) located on
|
| 227 |
+
output_device
|
| 228 |
+
"""
|
| 229 |
+
if not isinstance(inputs, tuple):
|
| 230 |
+
inputs = (inputs,) if inputs is not None else ()
|
| 231 |
+
|
| 232 |
+
device_type = _get_available_device_type()
|
| 233 |
+
|
| 234 |
+
if device_type is None:
|
| 235 |
+
raise RuntimeError("device type could not be determined")
|
| 236 |
+
|
| 237 |
+
if device_ids is None:
|
| 238 |
+
device_ids = _get_all_device_indices()
|
| 239 |
+
|
| 240 |
+
if device_ids is None:
|
| 241 |
+
raise RuntimeError("no available devices were found")
|
| 242 |
+
|
| 243 |
+
if output_device is None:
|
| 244 |
+
output_device = device_ids[0]
|
| 245 |
+
|
| 246 |
+
device_ids = [_get_device_index(x, True) for x in device_ids]
|
| 247 |
+
output_device = _get_device_index(output_device, True)
|
| 248 |
+
src_device_obj = torch.device(device_type, device_ids[0])
|
| 249 |
+
|
| 250 |
+
for t in chain(module.parameters(), module.buffers()):
|
| 251 |
+
if t.device != src_device_obj:
|
| 252 |
+
raise RuntimeError("module must have its parameters and buffers "
|
| 253 |
+
f"on device {src_device_obj} (device_ids[0]) but found one of "
|
| 254 |
+
f"them on device: {t.device}")
|
| 255 |
+
|
| 256 |
+
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
|
| 257 |
+
# for module without any inputs, empty list and dict will be created
|
| 258 |
+
# so the module can be executed on one device which is the first one in device_ids
|
| 259 |
+
if not inputs and not module_kwargs:
|
| 260 |
+
inputs = ((),)
|
| 261 |
+
module_kwargs = ({},)
|
| 262 |
+
|
| 263 |
+
assert module_kwargs is not None
|
| 264 |
+
|
| 265 |
+
if len(device_ids) == 1:
|
| 266 |
+
return module(*inputs[0], **module_kwargs[0])
|
| 267 |
+
used_device_ids = device_ids[:len(inputs)]
|
| 268 |
+
replicas = replicate(module, used_device_ids)
|
| 269 |
+
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
|
| 270 |
+
return gather(outputs, output_device, dim)
|
valley/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import torch
|
| 3 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
|
| 4 |
+
from ..modules import Module
|
| 5 |
+
from torch.cuda._utils import _get_device_index
|
| 6 |
+
from torch._utils import ExceptionWrapper
|
| 7 |
+
|
| 8 |
+
__all__ = ['get_a_var', 'parallel_apply']
|
| 9 |
+
|
| 10 |
+
def get_a_var(obj: Union[torch.Tensor, List[Any], Tuple[Any, ...], Dict[Any, Any]]) -> Optional[torch.Tensor]:
|
| 11 |
+
if isinstance(obj, torch.Tensor):
|
| 12 |
+
return obj
|
| 13 |
+
|
| 14 |
+
if isinstance(obj, (list, tuple)):
|
| 15 |
+
for result in map(get_a_var, obj):
|
| 16 |
+
if isinstance(result, torch.Tensor):
|
| 17 |
+
return result
|
| 18 |
+
if isinstance(obj, dict):
|
| 19 |
+
for result in map(get_a_var, obj.items()):
|
| 20 |
+
if isinstance(result, torch.Tensor):
|
| 21 |
+
return result
|
| 22 |
+
return None
|
| 23 |
+
|
| 24 |
+
def parallel_apply(
|
| 25 |
+
modules: Sequence[Module],
|
| 26 |
+
inputs: Sequence[Any],
|
| 27 |
+
kwargs_tup: Optional[Sequence[Dict[str, Any]]] = None,
|
| 28 |
+
devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None,
|
| 29 |
+
) -> List[Any]:
|
| 30 |
+
r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
modules (Module): modules to be parallelized
|
| 34 |
+
inputs (tensor): inputs to the modules
|
| 35 |
+
devices (list of int or torch.device): CUDA devices
|
| 36 |
+
|
| 37 |
+
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
|
| 38 |
+
:attr:`devices` (if given) should all have same length. Moreover, each
|
| 39 |
+
element of :attr:`inputs` can either be a single object as the only argument
|
| 40 |
+
to a module, or a collection of positional arguments.
|
| 41 |
+
"""
|
| 42 |
+
assert len(modules) == len(inputs), f'The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}'
|
| 43 |
+
if kwargs_tup is not None:
|
| 44 |
+
assert len(modules) == len(kwargs_tup)
|
| 45 |
+
else:
|
| 46 |
+
kwargs_tup = (cast(Dict[str, Any], {}),) * len(modules)
|
| 47 |
+
if devices is not None:
|
| 48 |
+
assert len(modules) == len(devices)
|
| 49 |
+
else:
|
| 50 |
+
devices = [None] * len(modules)
|
| 51 |
+
devices = [_get_device_index(x, True) for x in devices]
|
| 52 |
+
streams = [torch.cuda.current_stream(x) for x in devices]
|
| 53 |
+
lock = threading.Lock()
|
| 54 |
+
results = {}
|
| 55 |
+
grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()
|
| 56 |
+
|
| 57 |
+
def _worker(
|
| 58 |
+
i: int,
|
| 59 |
+
module: Module,
|
| 60 |
+
input: Any,
|
| 61 |
+
kwargs: Dict[str, Any],
|
| 62 |
+
device: Optional[Union[int, torch.device]] = None,
|
| 63 |
+
stream: Optional[torch.cuda.Stream] = None,
|
| 64 |
+
) -> None:
|
| 65 |
+
torch.set_grad_enabled(grad_enabled)
|
| 66 |
+
if device is None:
|
| 67 |
+
t = get_a_var(input)
|
| 68 |
+
if t is None:
|
| 69 |
+
with lock:
|
| 70 |
+
results[i] = ExceptionWrapper(
|
| 71 |
+
where=f"in replica {i}, no device was provided and no tensor input was found; "
|
| 72 |
+
"device cannot be resolved")
|
| 73 |
+
return
|
| 74 |
+
device = t.get_device()
|
| 75 |
+
if stream is None:
|
| 76 |
+
stream = torch.cuda.current_stream(device)
|
| 77 |
+
try:
|
| 78 |
+
with torch.cuda.device(device), torch.cuda.stream(
|
| 79 |
+
stream
|
| 80 |
+
), torch.amp.autocast("cuda", enabled=autocast_enabled):
|
| 81 |
+
# this also avoids accidental slicing of `input` if it is a Tensor
|
| 82 |
+
if not isinstance(input, (list, tuple)):
|
| 83 |
+
input = (input,)
|
| 84 |
+
output = module(*input, **kwargs)
|
| 85 |
+
with lock:
|
| 86 |
+
results[i] = output
|
| 87 |
+
except Exception:
|
| 88 |
+
with lock:
|
| 89 |
+
results[i] = ExceptionWrapper(
|
| 90 |
+
where=f"in replica {i} on device {device}")
|
| 91 |
+
|
| 92 |
+
if len(modules) > 1:
|
| 93 |
+
threads = [threading.Thread(target=_worker,
|
| 94 |
+
args=(i, module, input, kwargs, device, stream))
|
| 95 |
+
for i, (module, input, kwargs, device, stream) in
|
| 96 |
+
enumerate(zip(modules, inputs, kwargs_tup, devices, streams))]
|
| 97 |
+
|
| 98 |
+
for thread in threads:
|
| 99 |
+
thread.start()
|
| 100 |
+
for thread in threads:
|
| 101 |
+
thread.join()
|
| 102 |
+
else:
|
| 103 |
+
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0])
|
| 104 |
+
|
| 105 |
+
outputs = []
|
| 106 |
+
for i in range(len(inputs)):
|
| 107 |
+
output = results[i]
|
| 108 |
+
if isinstance(output, ExceptionWrapper):
|
| 109 |
+
output.reraise()
|
| 110 |
+
outputs.append(output)
|
| 111 |
+
return outputs
|
valley/lib/python3.10/site-packages/torch/testing/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch._C import FileCheck as FileCheck
|
| 2 |
+
from . import _utils
|
| 3 |
+
from ._comparison import assert_allclose, assert_close as assert_close
|
| 4 |
+
from ._creation import make_tensor as make_tensor
|
valley/lib/python3.10/site-packages/torch/testing/_comparison.py
ADDED
|
@@ -0,0 +1,1574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: allow-untyped-defs
|
| 2 |
+
import abc
|
| 3 |
+
import cmath
|
| 4 |
+
import collections.abc
|
| 5 |
+
import contextlib
|
| 6 |
+
from typing import (
|
| 7 |
+
Any,
|
| 8 |
+
Callable,
|
| 9 |
+
Collection,
|
| 10 |
+
Dict,
|
| 11 |
+
List,
|
| 12 |
+
NoReturn,
|
| 13 |
+
Optional,
|
| 14 |
+
Sequence,
|
| 15 |
+
Tuple,
|
| 16 |
+
Type,
|
| 17 |
+
Union,
|
| 18 |
+
)
|
| 19 |
+
from typing_extensions import deprecated
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import numpy as np
|
| 25 |
+
|
| 26 |
+
NUMPY_AVAILABLE = True
|
| 27 |
+
except ModuleNotFoundError:
|
| 28 |
+
NUMPY_AVAILABLE = False
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ErrorMeta(Exception):
|
| 32 |
+
"""Internal testing exception that makes that carries error metadata."""
|
| 33 |
+
|
| 34 |
+
def __init__(
|
| 35 |
+
self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
|
| 36 |
+
) -> None:
|
| 37 |
+
super().__init__(
|
| 38 |
+
"If you are a user and see this message during normal operation "
|
| 39 |
+
"please file an issue at https://github.com/pytorch/pytorch/issues. "
|
| 40 |
+
"If you are a developer and working on the comparison functions, please `raise ErrorMeta.to_error()` "
|
| 41 |
+
"for user facing errors."
|
| 42 |
+
)
|
| 43 |
+
self.type = type
|
| 44 |
+
self.msg = msg
|
| 45 |
+
self.id = id
|
| 46 |
+
|
| 47 |
+
def to_error(
|
| 48 |
+
self, msg: Optional[Union[str, Callable[[str], str]]] = None
|
| 49 |
+
) -> Exception:
|
| 50 |
+
if not isinstance(msg, str):
|
| 51 |
+
generated_msg = self.msg
|
| 52 |
+
if self.id:
|
| 53 |
+
generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}"
|
| 54 |
+
|
| 55 |
+
msg = msg(generated_msg) if callable(msg) else generated_msg
|
| 56 |
+
|
| 57 |
+
return self.type(msg)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# Some analysis of tolerance by logging tests from test_torch.py can be found in
|
| 61 |
+
# https://github.com/pytorch/pytorch/pull/32538.
|
| 62 |
+
# {dtype: (rtol, atol)}
|
| 63 |
+
_DTYPE_PRECISIONS = {
|
| 64 |
+
torch.float16: (0.001, 1e-5),
|
| 65 |
+
torch.bfloat16: (0.016, 1e-5),
|
| 66 |
+
torch.float32: (1.3e-6, 1e-5),
|
| 67 |
+
torch.float64: (1e-7, 1e-7),
|
| 68 |
+
torch.complex32: (0.001, 1e-5),
|
| 69 |
+
torch.complex64: (1.3e-6, 1e-5),
|
| 70 |
+
torch.complex128: (1e-7, 1e-7),
|
| 71 |
+
}
|
| 72 |
+
# The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in
|
| 73 |
+
# their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values`
|
| 74 |
+
_DTYPE_PRECISIONS.update(
|
| 75 |
+
dict.fromkeys(
|
| 76 |
+
(torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32),
|
| 77 |
+
_DTYPE_PRECISIONS[torch.float32],
|
| 78 |
+
)
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def default_tolerances(
|
| 83 |
+
*inputs: Union[torch.Tensor, torch.dtype],
|
| 84 |
+
dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None,
|
| 85 |
+
) -> Tuple[float, float]:
|
| 86 |
+
"""Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype.
|
| 87 |
+
|
| 88 |
+
See :func:`assert_close` for a table of the default tolerance for each dtype.
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
(Tuple[float, float]): Loosest tolerances of all input dtypes.
|
| 92 |
+
"""
|
| 93 |
+
dtypes = []
|
| 94 |
+
for input in inputs:
|
| 95 |
+
if isinstance(input, torch.Tensor):
|
| 96 |
+
dtypes.append(input.dtype)
|
| 97 |
+
elif isinstance(input, torch.dtype):
|
| 98 |
+
dtypes.append(input)
|
| 99 |
+
else:
|
| 100 |
+
raise TypeError(
|
| 101 |
+
f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead."
|
| 102 |
+
)
|
| 103 |
+
dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS
|
| 104 |
+
rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes])
|
| 105 |
+
return max(rtols), max(atols)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_tolerances(
|
| 109 |
+
*inputs: Union[torch.Tensor, torch.dtype],
|
| 110 |
+
rtol: Optional[float],
|
| 111 |
+
atol: Optional[float],
|
| 112 |
+
id: Tuple[Any, ...] = (),
|
| 113 |
+
) -> Tuple[float, float]:
|
| 114 |
+
"""Gets absolute and relative to be used for numeric comparisons.
|
| 115 |
+
|
| 116 |
+
If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of
|
| 117 |
+
:func:`default_tolerances` is used.
|
| 118 |
+
|
| 119 |
+
Raises:
|
| 120 |
+
ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified.
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
(Tuple[float, float]): Valid absolute and relative tolerances.
|
| 124 |
+
"""
|
| 125 |
+
if (rtol is None) ^ (atol is None):
|
| 126 |
+
# We require both tolerance to be omitted or specified, because specifying only one might lead to surprising
|
| 127 |
+
# results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0.
|
| 128 |
+
raise ErrorMeta(
|
| 129 |
+
ValueError,
|
| 130 |
+
f"Both 'rtol' and 'atol' must be either specified or omitted, "
|
| 131 |
+
f"but got no {'rtol' if rtol is None else 'atol'}.",
|
| 132 |
+
id=id,
|
| 133 |
+
)
|
| 134 |
+
elif rtol is not None and atol is not None:
|
| 135 |
+
return rtol, atol
|
| 136 |
+
else:
|
| 137 |
+
return default_tolerances(*inputs)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def _make_mismatch_msg(
|
| 141 |
+
*,
|
| 142 |
+
default_identifier: str,
|
| 143 |
+
identifier: Optional[Union[str, Callable[[str], str]]] = None,
|
| 144 |
+
extra: Optional[str] = None,
|
| 145 |
+
abs_diff: float,
|
| 146 |
+
abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
|
| 147 |
+
atol: float,
|
| 148 |
+
rel_diff: float,
|
| 149 |
+
rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
|
| 150 |
+
rtol: float,
|
| 151 |
+
) -> str:
|
| 152 |
+
"""Makes a mismatch error message for numeric values.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
default_identifier (str): Default description of the compared values, e.g. "Tensor-likes".
|
| 156 |
+
identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides
|
| 157 |
+
``default_identifier``. Can be passed as callable in which case it will be called with
|
| 158 |
+
``default_identifier`` to create the description at runtime.
|
| 159 |
+
extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics.
|
| 160 |
+
abs_diff (float): Absolute difference.
|
| 161 |
+
abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference.
|
| 162 |
+
atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are
|
| 163 |
+
``> 0``.
|
| 164 |
+
rel_diff (float): Relative difference.
|
| 165 |
+
rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference.
|
| 166 |
+
rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are
|
| 167 |
+
``> 0``.
|
| 168 |
+
"""
|
| 169 |
+
equality = rtol == 0 and atol == 0
|
| 170 |
+
|
| 171 |
+
def make_diff_msg(
|
| 172 |
+
*,
|
| 173 |
+
type: str,
|
| 174 |
+
diff: float,
|
| 175 |
+
idx: Optional[Union[int, Tuple[int, ...]]],
|
| 176 |
+
tol: float,
|
| 177 |
+
) -> str:
|
| 178 |
+
if idx is None:
|
| 179 |
+
msg = f"{type.title()} difference: {diff}"
|
| 180 |
+
else:
|
| 181 |
+
msg = f"Greatest {type} difference: {diff} at index {idx}"
|
| 182 |
+
if not equality:
|
| 183 |
+
msg += f" (up to {tol} allowed)"
|
| 184 |
+
return msg + "\n"
|
| 185 |
+
|
| 186 |
+
if identifier is None:
|
| 187 |
+
identifier = default_identifier
|
| 188 |
+
elif callable(identifier):
|
| 189 |
+
identifier = identifier(default_identifier)
|
| 190 |
+
|
| 191 |
+
msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n"
|
| 192 |
+
|
| 193 |
+
if extra:
|
| 194 |
+
msg += f"{extra.strip()}\n"
|
| 195 |
+
|
| 196 |
+
msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol)
|
| 197 |
+
msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol)
|
| 198 |
+
|
| 199 |
+
return msg.strip()
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def make_scalar_mismatch_msg(
|
| 203 |
+
actual: Union[bool, int, float, complex],
|
| 204 |
+
expected: Union[bool, int, float, complex],
|
| 205 |
+
*,
|
| 206 |
+
rtol: float,
|
| 207 |
+
atol: float,
|
| 208 |
+
identifier: Optional[Union[str, Callable[[str], str]]] = None,
|
| 209 |
+
) -> str:
|
| 210 |
+
"""Makes a mismatch error message for scalars.
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
actual (Union[bool, int, float, complex]): Actual scalar.
|
| 214 |
+
expected (Union[bool, int, float, complex]): Expected scalar.
|
| 215 |
+
rtol (float): Relative tolerance.
|
| 216 |
+
atol (float): Absolute tolerance.
|
| 217 |
+
identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed
|
| 218 |
+
as callable in which case it will be called by the default value to create the description at runtime.
|
| 219 |
+
Defaults to "Scalars".
|
| 220 |
+
"""
|
| 221 |
+
abs_diff = abs(actual - expected)
|
| 222 |
+
rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected)
|
| 223 |
+
return _make_mismatch_msg(
|
| 224 |
+
default_identifier="Scalars",
|
| 225 |
+
identifier=identifier,
|
| 226 |
+
extra=f"Expected {expected} but got {actual}.",
|
| 227 |
+
abs_diff=abs_diff,
|
| 228 |
+
atol=atol,
|
| 229 |
+
rel_diff=rel_diff,
|
| 230 |
+
rtol=rtol,
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def make_tensor_mismatch_msg(
|
| 235 |
+
actual: torch.Tensor,
|
| 236 |
+
expected: torch.Tensor,
|
| 237 |
+
matches: torch.Tensor,
|
| 238 |
+
*,
|
| 239 |
+
rtol: float,
|
| 240 |
+
atol: float,
|
| 241 |
+
identifier: Optional[Union[str, Callable[[str], str]]] = None,
|
| 242 |
+
):
|
| 243 |
+
"""Makes a mismatch error message for tensors.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
actual (torch.Tensor): Actual tensor.
|
| 247 |
+
expected (torch.Tensor): Expected tensor.
|
| 248 |
+
matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the
|
| 249 |
+
location of matches.
|
| 250 |
+
rtol (float): Relative tolerance.
|
| 251 |
+
atol (float): Absolute tolerance.
|
| 252 |
+
identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed
|
| 253 |
+
as callable in which case it will be called by the default value to create the description at runtime.
|
| 254 |
+
Defaults to "Tensor-likes".
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def unravel_flat_index(flat_index: int) -> Tuple[int, ...]:
|
| 258 |
+
if not matches.shape:
|
| 259 |
+
return ()
|
| 260 |
+
|
| 261 |
+
inverse_index = []
|
| 262 |
+
for size in matches.shape[::-1]:
|
| 263 |
+
div, mod = divmod(flat_index, size)
|
| 264 |
+
flat_index = div
|
| 265 |
+
inverse_index.append(mod)
|
| 266 |
+
|
| 267 |
+
return tuple(inverse_index[::-1])
|
| 268 |
+
|
| 269 |
+
number_of_elements = matches.numel()
|
| 270 |
+
total_mismatches = number_of_elements - int(torch.sum(matches))
|
| 271 |
+
extra = (
|
| 272 |
+
f"Mismatched elements: {total_mismatches} / {number_of_elements} "
|
| 273 |
+
f"({total_mismatches / number_of_elements:.1%})"
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
actual_flat = actual.flatten()
|
| 277 |
+
expected_flat = expected.flatten()
|
| 278 |
+
matches_flat = matches.flatten()
|
| 279 |
+
|
| 280 |
+
if not actual.dtype.is_floating_point and not actual.dtype.is_complex:
|
| 281 |
+
# TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid
|
| 282 |
+
# overflow
|
| 283 |
+
actual_flat = actual_flat.to(torch.int64)
|
| 284 |
+
expected_flat = expected_flat.to(torch.int64)
|
| 285 |
+
|
| 286 |
+
abs_diff = torch.abs(actual_flat - expected_flat)
|
| 287 |
+
# Ensure that only mismatches are used for the max_abs_diff computation
|
| 288 |
+
abs_diff[matches_flat] = 0
|
| 289 |
+
max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0)
|
| 290 |
+
|
| 291 |
+
rel_diff = abs_diff / torch.abs(expected_flat)
|
| 292 |
+
# Ensure that only mismatches are used for the max_rel_diff computation
|
| 293 |
+
rel_diff[matches_flat] = 0
|
| 294 |
+
max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0)
|
| 295 |
+
return _make_mismatch_msg(
|
| 296 |
+
default_identifier="Tensor-likes",
|
| 297 |
+
identifier=identifier,
|
| 298 |
+
extra=extra,
|
| 299 |
+
abs_diff=max_abs_diff.item(),
|
| 300 |
+
abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)),
|
| 301 |
+
atol=atol,
|
| 302 |
+
rel_diff=max_rel_diff.item(),
|
| 303 |
+
rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)),
|
| 304 |
+
rtol=rtol,
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class UnsupportedInputs(Exception): # noqa: B903
|
| 309 |
+
"""Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs."""
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
class Pair(abc.ABC):
|
| 313 |
+
"""ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`.
|
| 314 |
+
|
| 315 |
+
Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison.
|
| 316 |
+
|
| 317 |
+
Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the
|
| 318 |
+
super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to
|
| 319 |
+
handle the inputs and the next pair type will be tried.
|
| 320 |
+
|
| 321 |
+
All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can
|
| 322 |
+
be used to automatically handle overwriting the message with a user supplied one and id handling.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
def __init__(
|
| 326 |
+
self,
|
| 327 |
+
actual: Any,
|
| 328 |
+
expected: Any,
|
| 329 |
+
*,
|
| 330 |
+
id: Tuple[Any, ...] = (),
|
| 331 |
+
**unknown_parameters: Any,
|
| 332 |
+
) -> None:
|
| 333 |
+
self.actual = actual
|
| 334 |
+
self.expected = expected
|
| 335 |
+
self.id = id
|
| 336 |
+
self._unknown_parameters = unknown_parameters
|
| 337 |
+
|
| 338 |
+
@staticmethod
|
| 339 |
+
def _inputs_not_supported() -> NoReturn:
|
| 340 |
+
raise UnsupportedInputs
|
| 341 |
+
|
| 342 |
+
@staticmethod
|
| 343 |
+
def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]):
|
| 344 |
+
"""Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise."""
|
| 345 |
+
if not all(isinstance(input, cls) for input in inputs):
|
| 346 |
+
Pair._inputs_not_supported()
|
| 347 |
+
|
| 348 |
+
def _fail(
|
| 349 |
+
self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()
|
| 350 |
+
) -> NoReturn:
|
| 351 |
+
"""Raises an :class:`ErrorMeta` from a given exception type and message and the stored id.
|
| 352 |
+
|
| 353 |
+
.. warning::
|
| 354 |
+
|
| 355 |
+
If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id``
|
| 356 |
+
explicitly.
|
| 357 |
+
"""
|
| 358 |
+
raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id)
|
| 359 |
+
|
| 360 |
+
@abc.abstractmethod
|
| 361 |
+
def compare(self) -> None:
|
| 362 |
+
"""Compares the inputs and raises an :class`ErrorMeta` in case they mismatch."""
|
| 363 |
+
|
| 364 |
+
def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]:
|
| 365 |
+
"""Returns extra information that will be included in the representation.
|
| 366 |
+
|
| 367 |
+
Should be overwritten by all subclasses that use additional options. The representation of the object will only
|
| 368 |
+
be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of
|
| 369 |
+
key-value-pairs or attribute names.
|
| 370 |
+
"""
|
| 371 |
+
return []
|
| 372 |
+
|
| 373 |
+
def __repr__(self) -> str:
|
| 374 |
+
head = f"{type(self).__name__}("
|
| 375 |
+
tail = ")"
|
| 376 |
+
body = [
|
| 377 |
+
f" {name}={value!s},"
|
| 378 |
+
for name, value in [
|
| 379 |
+
("id", self.id),
|
| 380 |
+
("actual", self.actual),
|
| 381 |
+
("expected", self.expected),
|
| 382 |
+
*[
|
| 383 |
+
(extra, getattr(self, extra)) if isinstance(extra, str) else extra
|
| 384 |
+
for extra in self.extra_repr()
|
| 385 |
+
],
|
| 386 |
+
]
|
| 387 |
+
]
|
| 388 |
+
return "\n".join((head, *body, *tail))
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class ObjectPair(Pair):
|
| 392 |
+
"""Pair for any type of inputs that will be compared with the `==` operator.
|
| 393 |
+
|
| 394 |
+
.. note::
|
| 395 |
+
|
| 396 |
+
Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs
|
| 397 |
+
couldn't handle the inputs.
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
|
| 401 |
+
def compare(self) -> None:
|
| 402 |
+
try:
|
| 403 |
+
equal = self.actual == self.expected
|
| 404 |
+
except Exception as error:
|
| 405 |
+
# We are not using `self._raise_error_meta` here since we need the exception chaining
|
| 406 |
+
raise ErrorMeta(
|
| 407 |
+
ValueError,
|
| 408 |
+
f"{self.actual} == {self.expected} failed with:\n{error}.",
|
| 409 |
+
id=self.id,
|
| 410 |
+
) from error
|
| 411 |
+
|
| 412 |
+
if not equal:
|
| 413 |
+
self._fail(AssertionError, f"{self.actual} != {self.expected}")
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
class NonePair(Pair):
|
| 417 |
+
"""Pair for ``None`` inputs."""
|
| 418 |
+
|
| 419 |
+
def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None:
|
| 420 |
+
if not (actual is None or expected is None):
|
| 421 |
+
self._inputs_not_supported()
|
| 422 |
+
|
| 423 |
+
super().__init__(actual, expected, **other_parameters)
|
| 424 |
+
|
| 425 |
+
def compare(self) -> None:
|
| 426 |
+
if not (self.actual is None and self.expected is None):
|
| 427 |
+
self._fail(
|
| 428 |
+
AssertionError, f"None mismatch: {self.actual} is not {self.expected}"
|
| 429 |
+
)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
class BooleanPair(Pair):
|
| 433 |
+
"""Pair for :class:`bool` inputs.
|
| 434 |
+
|
| 435 |
+
.. note::
|
| 436 |
+
|
| 437 |
+
If ``numpy`` is available, also handles :class:`numpy.bool_` inputs.
|
| 438 |
+
|
| 439 |
+
"""
|
| 440 |
+
|
| 441 |
+
def __init__(
|
| 442 |
+
self,
|
| 443 |
+
actual: Any,
|
| 444 |
+
expected: Any,
|
| 445 |
+
*,
|
| 446 |
+
id: Tuple[Any, ...],
|
| 447 |
+
**other_parameters: Any,
|
| 448 |
+
) -> None:
|
| 449 |
+
actual, expected = self._process_inputs(actual, expected, id=id)
|
| 450 |
+
super().__init__(actual, expected, **other_parameters)
|
| 451 |
+
|
| 452 |
+
@property
|
| 453 |
+
def _supported_types(self) -> Tuple[Type, ...]:
|
| 454 |
+
cls: List[Type] = [bool]
|
| 455 |
+
if NUMPY_AVAILABLE:
|
| 456 |
+
cls.append(np.bool_)
|
| 457 |
+
return tuple(cls)
|
| 458 |
+
|
| 459 |
+
def _process_inputs(
|
| 460 |
+
self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
|
| 461 |
+
) -> Tuple[bool, bool]:
|
| 462 |
+
self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
|
| 463 |
+
actual, expected = (
|
| 464 |
+
self._to_bool(bool_like, id=id) for bool_like in (actual, expected)
|
| 465 |
+
)
|
| 466 |
+
return actual, expected
|
| 467 |
+
|
| 468 |
+
def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool:
|
| 469 |
+
if isinstance(bool_like, bool):
|
| 470 |
+
return bool_like
|
| 471 |
+
elif isinstance(bool_like, np.bool_):
|
| 472 |
+
return bool_like.item()
|
| 473 |
+
else:
|
| 474 |
+
raise ErrorMeta(
|
| 475 |
+
TypeError, f"Unknown boolean type {type(bool_like)}.", id=id
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
def compare(self) -> None:
|
| 479 |
+
if self.actual is not self.expected:
|
| 480 |
+
self._fail(
|
| 481 |
+
AssertionError,
|
| 482 |
+
f"Booleans mismatch: {self.actual} is not {self.expected}",
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class NumberPair(Pair):
|
| 487 |
+
"""Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs.
|
| 488 |
+
|
| 489 |
+
.. note::
|
| 490 |
+
|
| 491 |
+
If ``numpy`` is available, also handles :class:`numpy.number` inputs.
|
| 492 |
+
|
| 493 |
+
Kwargs:
|
| 494 |
+
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
|
| 495 |
+
values based on the type are selected with the below table.
|
| 496 |
+
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
|
| 497 |
+
values based on the type are selected with the below table.
|
| 498 |
+
equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
|
| 499 |
+
check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``.
|
| 500 |
+
|
| 501 |
+
The following table displays correspondence between Python number type and the ``torch.dtype``'s. See
|
| 502 |
+
:func:`assert_close` for the corresponding tolerances.
|
| 503 |
+
|
| 504 |
+
+------------------+-------------------------------+
|
| 505 |
+
| ``type`` | corresponding ``torch.dtype`` |
|
| 506 |
+
+==================+===============================+
|
| 507 |
+
| :class:`int` | :attr:`~torch.int64` |
|
| 508 |
+
+------------------+-------------------------------+
|
| 509 |
+
| :class:`float` | :attr:`~torch.float64` |
|
| 510 |
+
+------------------+-------------------------------+
|
| 511 |
+
| :class:`complex` | :attr:`~torch.complex64` |
|
| 512 |
+
+------------------+-------------------------------+
|
| 513 |
+
"""
|
| 514 |
+
|
| 515 |
+
_TYPE_TO_DTYPE = {
|
| 516 |
+
int: torch.int64,
|
| 517 |
+
float: torch.float64,
|
| 518 |
+
complex: torch.complex128,
|
| 519 |
+
}
|
| 520 |
+
_NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys())
|
| 521 |
+
|
| 522 |
+
def __init__(
|
| 523 |
+
self,
|
| 524 |
+
actual: Any,
|
| 525 |
+
expected: Any,
|
| 526 |
+
*,
|
| 527 |
+
id: Tuple[Any, ...] = (),
|
| 528 |
+
rtol: Optional[float] = None,
|
| 529 |
+
atol: Optional[float] = None,
|
| 530 |
+
equal_nan: bool = False,
|
| 531 |
+
check_dtype: bool = False,
|
| 532 |
+
**other_parameters: Any,
|
| 533 |
+
) -> None:
|
| 534 |
+
actual, expected = self._process_inputs(actual, expected, id=id)
|
| 535 |
+
super().__init__(actual, expected, id=id, **other_parameters)
|
| 536 |
+
|
| 537 |
+
self.rtol, self.atol = get_tolerances(
|
| 538 |
+
*[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)],
|
| 539 |
+
rtol=rtol,
|
| 540 |
+
atol=atol,
|
| 541 |
+
id=id,
|
| 542 |
+
)
|
| 543 |
+
self.equal_nan = equal_nan
|
| 544 |
+
self.check_dtype = check_dtype
|
| 545 |
+
|
| 546 |
+
@property
|
| 547 |
+
def _supported_types(self) -> Tuple[Type, ...]:
|
| 548 |
+
cls = list(self._NUMBER_TYPES)
|
| 549 |
+
if NUMPY_AVAILABLE:
|
| 550 |
+
cls.append(np.number)
|
| 551 |
+
return tuple(cls)
|
| 552 |
+
|
| 553 |
+
def _process_inputs(
|
| 554 |
+
self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
|
| 555 |
+
) -> Tuple[Union[int, float, complex], Union[int, float, complex]]:
|
| 556 |
+
self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
|
| 557 |
+
actual, expected = (
|
| 558 |
+
self._to_number(number_like, id=id) for number_like in (actual, expected)
|
| 559 |
+
)
|
| 560 |
+
return actual, expected
|
| 561 |
+
|
| 562 |
+
def _to_number(
|
| 563 |
+
self, number_like: Any, *, id: Tuple[Any, ...]
|
| 564 |
+
) -> Union[int, float, complex]:
|
| 565 |
+
if NUMPY_AVAILABLE and isinstance(number_like, np.number):
|
| 566 |
+
return number_like.item()
|
| 567 |
+
elif isinstance(number_like, self._NUMBER_TYPES):
|
| 568 |
+
return number_like # type: ignore[return-value]
|
| 569 |
+
else:
|
| 570 |
+
raise ErrorMeta(
|
| 571 |
+
TypeError, f"Unknown number type {type(number_like)}.", id=id
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
def compare(self) -> None:
|
| 575 |
+
if self.check_dtype and type(self.actual) is not type(self.expected):
|
| 576 |
+
self._fail(
|
| 577 |
+
AssertionError,
|
| 578 |
+
f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.",
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
if self.actual == self.expected:
|
| 582 |
+
return
|
| 583 |
+
|
| 584 |
+
if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected):
|
| 585 |
+
return
|
| 586 |
+
|
| 587 |
+
abs_diff = abs(self.actual - self.expected)
|
| 588 |
+
tolerance = self.atol + self.rtol * abs(self.expected)
|
| 589 |
+
|
| 590 |
+
if cmath.isfinite(abs_diff) and abs_diff <= tolerance:
|
| 591 |
+
return
|
| 592 |
+
|
| 593 |
+
self._fail(
|
| 594 |
+
AssertionError,
|
| 595 |
+
make_scalar_mismatch_msg(
|
| 596 |
+
self.actual, self.expected, rtol=self.rtol, atol=self.atol
|
| 597 |
+
),
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
def extra_repr(self) -> Sequence[str]:
|
| 601 |
+
return (
|
| 602 |
+
"rtol",
|
| 603 |
+
"atol",
|
| 604 |
+
"equal_nan",
|
| 605 |
+
"check_dtype",
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
class TensorLikePair(Pair):
|
| 610 |
+
"""Pair for :class:`torch.Tensor`-like inputs.
|
| 611 |
+
|
| 612 |
+
Kwargs:
|
| 613 |
+
allow_subclasses (bool):
|
| 614 |
+
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
|
| 615 |
+
values based on the type are selected. See :func:assert_close: for details.
|
| 616 |
+
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
|
| 617 |
+
values based on the type are selected. See :func:assert_close: for details.
|
| 618 |
+
equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
|
| 619 |
+
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
|
| 620 |
+
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
|
| 621 |
+
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
|
| 622 |
+
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
|
| 623 |
+
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
|
| 624 |
+
:func:`torch.promote_types`) before being compared.
|
| 625 |
+
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
|
| 626 |
+
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
|
| 627 |
+
compared.
|
| 628 |
+
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
|
| 629 |
+
"""
|
| 630 |
+
|
| 631 |
+
def __init__(
|
| 632 |
+
self,
|
| 633 |
+
actual: Any,
|
| 634 |
+
expected: Any,
|
| 635 |
+
*,
|
| 636 |
+
id: Tuple[Any, ...] = (),
|
| 637 |
+
allow_subclasses: bool = True,
|
| 638 |
+
rtol: Optional[float] = None,
|
| 639 |
+
atol: Optional[float] = None,
|
| 640 |
+
equal_nan: bool = False,
|
| 641 |
+
check_device: bool = True,
|
| 642 |
+
check_dtype: bool = True,
|
| 643 |
+
check_layout: bool = True,
|
| 644 |
+
check_stride: bool = False,
|
| 645 |
+
**other_parameters: Any,
|
| 646 |
+
):
|
| 647 |
+
actual, expected = self._process_inputs(
|
| 648 |
+
actual, expected, id=id, allow_subclasses=allow_subclasses
|
| 649 |
+
)
|
| 650 |
+
super().__init__(actual, expected, id=id, **other_parameters)
|
| 651 |
+
|
| 652 |
+
self.rtol, self.atol = get_tolerances(
|
| 653 |
+
actual, expected, rtol=rtol, atol=atol, id=self.id
|
| 654 |
+
)
|
| 655 |
+
self.equal_nan = equal_nan
|
| 656 |
+
self.check_device = check_device
|
| 657 |
+
self.check_dtype = check_dtype
|
| 658 |
+
self.check_layout = check_layout
|
| 659 |
+
self.check_stride = check_stride
|
| 660 |
+
|
| 661 |
+
def _process_inputs(
|
| 662 |
+
self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool
|
| 663 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 664 |
+
directly_related = isinstance(actual, type(expected)) or isinstance(
|
| 665 |
+
expected, type(actual)
|
| 666 |
+
)
|
| 667 |
+
if not directly_related:
|
| 668 |
+
self._inputs_not_supported()
|
| 669 |
+
|
| 670 |
+
if not allow_subclasses and type(actual) is not type(expected):
|
| 671 |
+
self._inputs_not_supported()
|
| 672 |
+
|
| 673 |
+
actual, expected = (self._to_tensor(input) for input in (actual, expected))
|
| 674 |
+
for tensor in (actual, expected):
|
| 675 |
+
self._check_supported(tensor, id=id)
|
| 676 |
+
return actual, expected
|
| 677 |
+
|
| 678 |
+
def _to_tensor(self, tensor_like: Any) -> torch.Tensor:
|
| 679 |
+
if isinstance(tensor_like, torch.Tensor):
|
| 680 |
+
return tensor_like
|
| 681 |
+
|
| 682 |
+
try:
|
| 683 |
+
return torch.as_tensor(tensor_like)
|
| 684 |
+
except Exception:
|
| 685 |
+
self._inputs_not_supported()
|
| 686 |
+
|
| 687 |
+
def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None:
|
| 688 |
+
if tensor.layout not in {
|
| 689 |
+
torch.strided,
|
| 690 |
+
torch.sparse_coo,
|
| 691 |
+
torch.sparse_csr,
|
| 692 |
+
torch.sparse_csc,
|
| 693 |
+
torch.sparse_bsr,
|
| 694 |
+
torch.sparse_bsc,
|
| 695 |
+
}:
|
| 696 |
+
raise ErrorMeta(
|
| 697 |
+
ValueError, f"Unsupported tensor layout {tensor.layout}", id=id
|
| 698 |
+
)
|
| 699 |
+
|
| 700 |
+
def compare(self) -> None:
|
| 701 |
+
actual, expected = self.actual, self.expected
|
| 702 |
+
|
| 703 |
+
self._compare_attributes(actual, expected)
|
| 704 |
+
if any(input.device.type == "meta" for input in (actual, expected)):
|
| 705 |
+
return
|
| 706 |
+
|
| 707 |
+
actual, expected = self._equalize_attributes(actual, expected)
|
| 708 |
+
self._compare_values(actual, expected)
|
| 709 |
+
|
| 710 |
+
def _compare_attributes(
|
| 711 |
+
self,
|
| 712 |
+
actual: torch.Tensor,
|
| 713 |
+
expected: torch.Tensor,
|
| 714 |
+
) -> None:
|
| 715 |
+
"""Checks if the attributes of two tensors match.
|
| 716 |
+
|
| 717 |
+
Always checks
|
| 718 |
+
|
| 719 |
+
- the :attr:`~torch.Tensor.shape`,
|
| 720 |
+
- whether both inputs are quantized or not,
|
| 721 |
+
- and if they use the same quantization scheme.
|
| 722 |
+
|
| 723 |
+
Checks for
|
| 724 |
+
|
| 725 |
+
- :attr:`~torch.Tensor.layout`,
|
| 726 |
+
- :meth:`~torch.Tensor.stride`,
|
| 727 |
+
- :attr:`~torch.Tensor.device`, and
|
| 728 |
+
- :attr:`~torch.Tensor.dtype`
|
| 729 |
+
|
| 730 |
+
are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair.
|
| 731 |
+
"""
|
| 732 |
+
|
| 733 |
+
def raise_mismatch_error(
|
| 734 |
+
attribute_name: str, actual_value: Any, expected_value: Any
|
| 735 |
+
) -> NoReturn:
|
| 736 |
+
self._fail(
|
| 737 |
+
AssertionError,
|
| 738 |
+
f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.",
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
if actual.shape != expected.shape:
|
| 742 |
+
raise_mismatch_error("shape", actual.shape, expected.shape)
|
| 743 |
+
|
| 744 |
+
if actual.is_quantized != expected.is_quantized:
|
| 745 |
+
raise_mismatch_error(
|
| 746 |
+
"is_quantized", actual.is_quantized, expected.is_quantized
|
| 747 |
+
)
|
| 748 |
+
elif actual.is_quantized and actual.qscheme() != expected.qscheme():
|
| 749 |
+
raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme())
|
| 750 |
+
|
| 751 |
+
if actual.layout != expected.layout:
|
| 752 |
+
if self.check_layout:
|
| 753 |
+
raise_mismatch_error("layout", actual.layout, expected.layout)
|
| 754 |
+
elif (
|
| 755 |
+
actual.layout == torch.strided
|
| 756 |
+
and self.check_stride
|
| 757 |
+
and actual.stride() != expected.stride()
|
| 758 |
+
):
|
| 759 |
+
raise_mismatch_error("stride()", actual.stride(), expected.stride())
|
| 760 |
+
|
| 761 |
+
if self.check_device and actual.device != expected.device:
|
| 762 |
+
raise_mismatch_error("device", actual.device, expected.device)
|
| 763 |
+
|
| 764 |
+
if self.check_dtype and actual.dtype != expected.dtype:
|
| 765 |
+
raise_mismatch_error("dtype", actual.dtype, expected.dtype)
|
| 766 |
+
|
| 767 |
+
def _equalize_attributes(
|
| 768 |
+
self, actual: torch.Tensor, expected: torch.Tensor
|
| 769 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 770 |
+
"""Equalizes some attributes of two tensors for value comparison.
|
| 771 |
+
|
| 772 |
+
If ``actual`` and ``expected`` are ...
|
| 773 |
+
|
| 774 |
+
- ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory.
|
| 775 |
+
- ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to
|
| 776 |
+
:func:`torch.promote_types`).
|
| 777 |
+
- ... not of the same ``layout``, they are converted to strided tensors.
|
| 778 |
+
|
| 779 |
+
Args:
|
| 780 |
+
actual (Tensor): Actual tensor.
|
| 781 |
+
expected (Tensor): Expected tensor.
|
| 782 |
+
|
| 783 |
+
Returns:
|
| 784 |
+
(Tuple[Tensor, Tensor]): Equalized tensors.
|
| 785 |
+
"""
|
| 786 |
+
# The comparison logic uses operators currently not supported by the MPS backends.
|
| 787 |
+
# See https://github.com/pytorch/pytorch/issues/77144 for details.
|
| 788 |
+
# TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend
|
| 789 |
+
if actual.is_mps or expected.is_mps: # type: ignore[attr-defined]
|
| 790 |
+
actual = actual.cpu()
|
| 791 |
+
expected = expected.cpu()
|
| 792 |
+
|
| 793 |
+
if actual.device != expected.device:
|
| 794 |
+
actual = actual.cpu()
|
| 795 |
+
expected = expected.cpu()
|
| 796 |
+
|
| 797 |
+
if actual.dtype != expected.dtype:
|
| 798 |
+
actual_dtype = actual.dtype
|
| 799 |
+
expected_dtype = expected.dtype
|
| 800 |
+
# For uint64, this is not sound in general, which is why promote_types doesn't
|
| 801 |
+
# allow it, but for easy testing, we're unlikely to get confused
|
| 802 |
+
# by large uint64 overflowing into negative int64
|
| 803 |
+
if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]:
|
| 804 |
+
actual_dtype = torch.int64
|
| 805 |
+
if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]:
|
| 806 |
+
expected_dtype = torch.int64
|
| 807 |
+
dtype = torch.promote_types(actual_dtype, expected_dtype)
|
| 808 |
+
actual = actual.to(dtype)
|
| 809 |
+
expected = expected.to(dtype)
|
| 810 |
+
|
| 811 |
+
if actual.layout != expected.layout:
|
| 812 |
+
# These checks are needed, since Tensor.to_dense() fails on tensors that are already strided
|
| 813 |
+
actual = actual.to_dense() if actual.layout != torch.strided else actual
|
| 814 |
+
expected = (
|
| 815 |
+
expected.to_dense() if expected.layout != torch.strided else expected
|
| 816 |
+
)
|
| 817 |
+
|
| 818 |
+
return actual, expected
|
| 819 |
+
|
| 820 |
+
def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None:
|
| 821 |
+
if actual.is_quantized:
|
| 822 |
+
compare_fn = self._compare_quantized_values
|
| 823 |
+
elif actual.is_sparse:
|
| 824 |
+
compare_fn = self._compare_sparse_coo_values
|
| 825 |
+
elif actual.layout in {
|
| 826 |
+
torch.sparse_csr,
|
| 827 |
+
torch.sparse_csc,
|
| 828 |
+
torch.sparse_bsr,
|
| 829 |
+
torch.sparse_bsc,
|
| 830 |
+
}:
|
| 831 |
+
compare_fn = self._compare_sparse_compressed_values
|
| 832 |
+
else:
|
| 833 |
+
compare_fn = self._compare_regular_values_close
|
| 834 |
+
|
| 835 |
+
compare_fn(
|
| 836 |
+
actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
def _compare_quantized_values(
|
| 840 |
+
self,
|
| 841 |
+
actual: torch.Tensor,
|
| 842 |
+
expected: torch.Tensor,
|
| 843 |
+
*,
|
| 844 |
+
rtol: float,
|
| 845 |
+
atol: float,
|
| 846 |
+
equal_nan: bool,
|
| 847 |
+
) -> None:
|
| 848 |
+
"""Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness.
|
| 849 |
+
|
| 850 |
+
.. note::
|
| 851 |
+
|
| 852 |
+
A detailed discussion about why only the dequantized variant is checked for closeness rather than checking
|
| 853 |
+
the individual quantization parameters for closeness and the integer representation for equality can be
|
| 854 |
+
found in https://github.com/pytorch/pytorch/issues/68548.
|
| 855 |
+
"""
|
| 856 |
+
return self._compare_regular_values_close(
|
| 857 |
+
actual.dequantize(),
|
| 858 |
+
expected.dequantize(),
|
| 859 |
+
rtol=rtol,
|
| 860 |
+
atol=atol,
|
| 861 |
+
equal_nan=equal_nan,
|
| 862 |
+
identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}",
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
def _compare_sparse_coo_values(
|
| 866 |
+
self,
|
| 867 |
+
actual: torch.Tensor,
|
| 868 |
+
expected: torch.Tensor,
|
| 869 |
+
*,
|
| 870 |
+
rtol: float,
|
| 871 |
+
atol: float,
|
| 872 |
+
equal_nan: bool,
|
| 873 |
+
) -> None:
|
| 874 |
+
"""Compares sparse COO tensors by comparing
|
| 875 |
+
|
| 876 |
+
- the number of sparse dimensions,
|
| 877 |
+
- the number of non-zero elements (nnz) for equality,
|
| 878 |
+
- the indices for equality, and
|
| 879 |
+
- the values for closeness.
|
| 880 |
+
"""
|
| 881 |
+
if actual.sparse_dim() != expected.sparse_dim():
|
| 882 |
+
self._fail(
|
| 883 |
+
AssertionError,
|
| 884 |
+
(
|
| 885 |
+
f"The number of sparse dimensions in sparse COO tensors does not match: "
|
| 886 |
+
f"{actual.sparse_dim()} != {expected.sparse_dim()}"
|
| 887 |
+
),
|
| 888 |
+
)
|
| 889 |
+
|
| 890 |
+
if actual._nnz() != expected._nnz():
|
| 891 |
+
self._fail(
|
| 892 |
+
AssertionError,
|
| 893 |
+
(
|
| 894 |
+
f"The number of specified values in sparse COO tensors does not match: "
|
| 895 |
+
f"{actual._nnz()} != {expected._nnz()}"
|
| 896 |
+
),
|
| 897 |
+
)
|
| 898 |
+
|
| 899 |
+
self._compare_regular_values_equal(
|
| 900 |
+
actual._indices(),
|
| 901 |
+
expected._indices(),
|
| 902 |
+
identifier="Sparse COO indices",
|
| 903 |
+
)
|
| 904 |
+
self._compare_regular_values_close(
|
| 905 |
+
actual._values(),
|
| 906 |
+
expected._values(),
|
| 907 |
+
rtol=rtol,
|
| 908 |
+
atol=atol,
|
| 909 |
+
equal_nan=equal_nan,
|
| 910 |
+
identifier="Sparse COO values",
|
| 911 |
+
)
|
| 912 |
+
|
| 913 |
+
def _compare_sparse_compressed_values(
|
| 914 |
+
self,
|
| 915 |
+
actual: torch.Tensor,
|
| 916 |
+
expected: torch.Tensor,
|
| 917 |
+
*,
|
| 918 |
+
rtol: float,
|
| 919 |
+
atol: float,
|
| 920 |
+
equal_nan: bool,
|
| 921 |
+
) -> None:
|
| 922 |
+
"""Compares sparse compressed tensors by comparing
|
| 923 |
+
|
| 924 |
+
- the number of non-zero elements (nnz) for equality,
|
| 925 |
+
- the plain indices for equality,
|
| 926 |
+
- the compressed indices for equality, and
|
| 927 |
+
- the values for closeness.
|
| 928 |
+
"""
|
| 929 |
+
format_name, compressed_indices_method, plain_indices_method = {
|
| 930 |
+
torch.sparse_csr: (
|
| 931 |
+
"CSR",
|
| 932 |
+
torch.Tensor.crow_indices,
|
| 933 |
+
torch.Tensor.col_indices,
|
| 934 |
+
),
|
| 935 |
+
torch.sparse_csc: (
|
| 936 |
+
"CSC",
|
| 937 |
+
torch.Tensor.ccol_indices,
|
| 938 |
+
torch.Tensor.row_indices,
|
| 939 |
+
),
|
| 940 |
+
torch.sparse_bsr: (
|
| 941 |
+
"BSR",
|
| 942 |
+
torch.Tensor.crow_indices,
|
| 943 |
+
torch.Tensor.col_indices,
|
| 944 |
+
),
|
| 945 |
+
torch.sparse_bsc: (
|
| 946 |
+
"BSC",
|
| 947 |
+
torch.Tensor.ccol_indices,
|
| 948 |
+
torch.Tensor.row_indices,
|
| 949 |
+
),
|
| 950 |
+
}[actual.layout]
|
| 951 |
+
|
| 952 |
+
if actual._nnz() != expected._nnz():
|
| 953 |
+
self._fail(
|
| 954 |
+
AssertionError,
|
| 955 |
+
(
|
| 956 |
+
f"The number of specified values in sparse {format_name} tensors does not match: "
|
| 957 |
+
f"{actual._nnz()} != {expected._nnz()}"
|
| 958 |
+
),
|
| 959 |
+
)
|
| 960 |
+
|
| 961 |
+
# Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_
|
| 962 |
+
# `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it
|
| 963 |
+
# can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will
|
| 964 |
+
# fail.
|
| 965 |
+
actual_compressed_indices = compressed_indices_method(actual)
|
| 966 |
+
expected_compressed_indices = compressed_indices_method(expected)
|
| 967 |
+
indices_dtype = torch.promote_types(
|
| 968 |
+
actual_compressed_indices.dtype, expected_compressed_indices.dtype
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
self._compare_regular_values_equal(
|
| 972 |
+
actual_compressed_indices.to(indices_dtype),
|
| 973 |
+
expected_compressed_indices.to(indices_dtype),
|
| 974 |
+
identifier=f"Sparse {format_name} {compressed_indices_method.__name__}",
|
| 975 |
+
)
|
| 976 |
+
self._compare_regular_values_equal(
|
| 977 |
+
plain_indices_method(actual).to(indices_dtype),
|
| 978 |
+
plain_indices_method(expected).to(indices_dtype),
|
| 979 |
+
identifier=f"Sparse {format_name} {plain_indices_method.__name__}",
|
| 980 |
+
)
|
| 981 |
+
self._compare_regular_values_close(
|
| 982 |
+
actual.values(),
|
| 983 |
+
expected.values(),
|
| 984 |
+
rtol=rtol,
|
| 985 |
+
atol=atol,
|
| 986 |
+
equal_nan=equal_nan,
|
| 987 |
+
identifier=f"Sparse {format_name} values",
|
| 988 |
+
)
|
| 989 |
+
|
| 990 |
+
def _compare_regular_values_equal(
|
| 991 |
+
self,
|
| 992 |
+
actual: torch.Tensor,
|
| 993 |
+
expected: torch.Tensor,
|
| 994 |
+
*,
|
| 995 |
+
equal_nan: bool = False,
|
| 996 |
+
identifier: Optional[Union[str, Callable[[str], str]]] = None,
|
| 997 |
+
) -> None:
|
| 998 |
+
"""Checks if the values of two tensors are equal."""
|
| 999 |
+
self._compare_regular_values_close(
|
| 1000 |
+
actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier
|
| 1001 |
+
)
|
| 1002 |
+
|
| 1003 |
+
def _compare_regular_values_close(
|
| 1004 |
+
self,
|
| 1005 |
+
actual: torch.Tensor,
|
| 1006 |
+
expected: torch.Tensor,
|
| 1007 |
+
*,
|
| 1008 |
+
rtol: float,
|
| 1009 |
+
atol: float,
|
| 1010 |
+
equal_nan: bool,
|
| 1011 |
+
identifier: Optional[Union[str, Callable[[str], str]]] = None,
|
| 1012 |
+
) -> None:
|
| 1013 |
+
"""Checks if the values of two tensors are close up to a desired tolerance."""
|
| 1014 |
+
matches = torch.isclose(
|
| 1015 |
+
actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan
|
| 1016 |
+
)
|
| 1017 |
+
if torch.all(matches):
|
| 1018 |
+
return
|
| 1019 |
+
|
| 1020 |
+
if actual.shape == torch.Size([]):
|
| 1021 |
+
msg = make_scalar_mismatch_msg(
|
| 1022 |
+
actual.item(),
|
| 1023 |
+
expected.item(),
|
| 1024 |
+
rtol=rtol,
|
| 1025 |
+
atol=atol,
|
| 1026 |
+
identifier=identifier,
|
| 1027 |
+
)
|
| 1028 |
+
else:
|
| 1029 |
+
msg = make_tensor_mismatch_msg(
|
| 1030 |
+
actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier
|
| 1031 |
+
)
|
| 1032 |
+
self._fail(AssertionError, msg)
|
| 1033 |
+
|
| 1034 |
+
def extra_repr(self) -> Sequence[str]:
|
| 1035 |
+
return (
|
| 1036 |
+
"rtol",
|
| 1037 |
+
"atol",
|
| 1038 |
+
"equal_nan",
|
| 1039 |
+
"check_device",
|
| 1040 |
+
"check_dtype",
|
| 1041 |
+
"check_layout",
|
| 1042 |
+
"check_stride",
|
| 1043 |
+
)
|
| 1044 |
+
|
| 1045 |
+
|
| 1046 |
+
def originate_pairs(
|
| 1047 |
+
actual: Any,
|
| 1048 |
+
expected: Any,
|
| 1049 |
+
*,
|
| 1050 |
+
pair_types: Sequence[Type[Pair]],
|
| 1051 |
+
sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
|
| 1052 |
+
mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
|
| 1053 |
+
id: Tuple[Any, ...] = (),
|
| 1054 |
+
**options: Any,
|
| 1055 |
+
) -> List[Pair]:
|
| 1056 |
+
"""Originates pairs from the individual inputs.
|
| 1057 |
+
|
| 1058 |
+
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
|
| 1059 |
+
:class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them.
|
| 1060 |
+
|
| 1061 |
+
Args:
|
| 1062 |
+
actual (Any): Actual input.
|
| 1063 |
+
expected (Any): Expected input.
|
| 1064 |
+
pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs.
|
| 1065 |
+
First successful pair will be used.
|
| 1066 |
+
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
|
| 1067 |
+
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
|
| 1068 |
+
id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message.
|
| 1069 |
+
**options (Any): Options passed to each pair during construction.
|
| 1070 |
+
|
| 1071 |
+
Raises:
|
| 1072 |
+
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their
|
| 1073 |
+
length does not match.
|
| 1074 |
+
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of
|
| 1075 |
+
keys do not match.
|
| 1076 |
+
ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs.
|
| 1077 |
+
ErrorMeta: With any expected exception that happens during the construction of a pair.
|
| 1078 |
+
|
| 1079 |
+
Returns:
|
| 1080 |
+
(List[Pair]): Originated pairs.
|
| 1081 |
+
"""
|
| 1082 |
+
# We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
|
| 1083 |
+
# "a" == "a"[0][0]...
|
| 1084 |
+
if (
|
| 1085 |
+
isinstance(actual, sequence_types)
|
| 1086 |
+
and not isinstance(actual, str)
|
| 1087 |
+
and isinstance(expected, sequence_types)
|
| 1088 |
+
and not isinstance(expected, str)
|
| 1089 |
+
):
|
| 1090 |
+
actual_len = len(actual)
|
| 1091 |
+
expected_len = len(expected)
|
| 1092 |
+
if actual_len != expected_len:
|
| 1093 |
+
raise ErrorMeta(
|
| 1094 |
+
AssertionError,
|
| 1095 |
+
f"The length of the sequences mismatch: {actual_len} != {expected_len}",
|
| 1096 |
+
id=id,
|
| 1097 |
+
)
|
| 1098 |
+
|
| 1099 |
+
pairs = []
|
| 1100 |
+
for idx in range(actual_len):
|
| 1101 |
+
pairs.extend(
|
| 1102 |
+
originate_pairs(
|
| 1103 |
+
actual[idx],
|
| 1104 |
+
expected[idx],
|
| 1105 |
+
pair_types=pair_types,
|
| 1106 |
+
sequence_types=sequence_types,
|
| 1107 |
+
mapping_types=mapping_types,
|
| 1108 |
+
id=(*id, idx),
|
| 1109 |
+
**options,
|
| 1110 |
+
)
|
| 1111 |
+
)
|
| 1112 |
+
return pairs
|
| 1113 |
+
|
| 1114 |
+
elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types):
|
| 1115 |
+
actual_keys = set(actual.keys())
|
| 1116 |
+
expected_keys = set(expected.keys())
|
| 1117 |
+
if actual_keys != expected_keys:
|
| 1118 |
+
missing_keys = expected_keys - actual_keys
|
| 1119 |
+
additional_keys = actual_keys - expected_keys
|
| 1120 |
+
raise ErrorMeta(
|
| 1121 |
+
AssertionError,
|
| 1122 |
+
(
|
| 1123 |
+
f"The keys of the mappings do not match:\n"
|
| 1124 |
+
f"Missing keys in the actual mapping: {sorted(missing_keys)}\n"
|
| 1125 |
+
f"Additional keys in the actual mapping: {sorted(additional_keys)}"
|
| 1126 |
+
),
|
| 1127 |
+
id=id,
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
keys: Collection = actual_keys
|
| 1131 |
+
# Since the origination aborts after the first failure, we try to be deterministic
|
| 1132 |
+
with contextlib.suppress(Exception):
|
| 1133 |
+
keys = sorted(keys)
|
| 1134 |
+
|
| 1135 |
+
pairs = []
|
| 1136 |
+
for key in keys:
|
| 1137 |
+
pairs.extend(
|
| 1138 |
+
originate_pairs(
|
| 1139 |
+
actual[key],
|
| 1140 |
+
expected[key],
|
| 1141 |
+
pair_types=pair_types,
|
| 1142 |
+
sequence_types=sequence_types,
|
| 1143 |
+
mapping_types=mapping_types,
|
| 1144 |
+
id=(*id, key),
|
| 1145 |
+
**options,
|
| 1146 |
+
)
|
| 1147 |
+
)
|
| 1148 |
+
return pairs
|
| 1149 |
+
|
| 1150 |
+
else:
|
| 1151 |
+
for pair_type in pair_types:
|
| 1152 |
+
try:
|
| 1153 |
+
return [pair_type(actual, expected, id=id, **options)]
|
| 1154 |
+
# Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the
|
| 1155 |
+
# inputs. Thus, we try the next pair type.
|
| 1156 |
+
except UnsupportedInputs:
|
| 1157 |
+
continue
|
| 1158 |
+
# Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This
|
| 1159 |
+
# is only in a separate branch, because the one below would also except it.
|
| 1160 |
+
except ErrorMeta:
|
| 1161 |
+
raise
|
| 1162 |
+
# Raising any other exception during origination is unexpected and will give some extra information about
|
| 1163 |
+
# what happened. If applicable, the exception should be expected in the future.
|
| 1164 |
+
except Exception as error:
|
| 1165 |
+
raise RuntimeError(
|
| 1166 |
+
f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n"
|
| 1167 |
+
f"{type(actual).__name__}(): {actual}\n\n"
|
| 1168 |
+
f"and\n\n"
|
| 1169 |
+
f"{type(expected).__name__}(): {expected}\n\n"
|
| 1170 |
+
f"resulted in the unexpected exception above. "
|
| 1171 |
+
f"If you are a user and see this message during normal operation "
|
| 1172 |
+
"please file an issue at https://github.com/pytorch/pytorch/issues. "
|
| 1173 |
+
"If you are a developer and working on the comparison functions, "
|
| 1174 |
+
"please except the previous error and raise an expressive `ErrorMeta` instead."
|
| 1175 |
+
) from error
|
| 1176 |
+
else:
|
| 1177 |
+
raise ErrorMeta(
|
| 1178 |
+
TypeError,
|
| 1179 |
+
f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.",
|
| 1180 |
+
id=id,
|
| 1181 |
+
)
|
| 1182 |
+
|
| 1183 |
+
|
| 1184 |
+
def not_close_error_metas(
|
| 1185 |
+
actual: Any,
|
| 1186 |
+
expected: Any,
|
| 1187 |
+
*,
|
| 1188 |
+
pair_types: Sequence[Type[Pair]] = (ObjectPair,),
|
| 1189 |
+
sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
|
| 1190 |
+
mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
|
| 1191 |
+
**options: Any,
|
| 1192 |
+
) -> List[ErrorMeta]:
|
| 1193 |
+
"""Asserts that inputs are equal.
|
| 1194 |
+
|
| 1195 |
+
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
|
| 1196 |
+
:class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them.
|
| 1197 |
+
|
| 1198 |
+
Args:
|
| 1199 |
+
actual (Any): Actual input.
|
| 1200 |
+
expected (Any): Expected input.
|
| 1201 |
+
pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the
|
| 1202 |
+
inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`.
|
| 1203 |
+
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
|
| 1204 |
+
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
|
| 1205 |
+
**options (Any): Options passed to each pair during construction.
|
| 1206 |
+
"""
|
| 1207 |
+
# Hide this function from `pytest`'s traceback
|
| 1208 |
+
__tracebackhide__ = True
|
| 1209 |
+
|
| 1210 |
+
try:
|
| 1211 |
+
pairs = originate_pairs(
|
| 1212 |
+
actual,
|
| 1213 |
+
expected,
|
| 1214 |
+
pair_types=pair_types,
|
| 1215 |
+
sequence_types=sequence_types,
|
| 1216 |
+
mapping_types=mapping_types,
|
| 1217 |
+
**options,
|
| 1218 |
+
)
|
| 1219 |
+
except ErrorMeta as error_meta:
|
| 1220 |
+
# Explicitly raising from None to hide the internal traceback
|
| 1221 |
+
raise error_meta.to_error() from None # noqa: RSE102
|
| 1222 |
+
|
| 1223 |
+
error_metas: List[ErrorMeta] = []
|
| 1224 |
+
for pair in pairs:
|
| 1225 |
+
try:
|
| 1226 |
+
pair.compare()
|
| 1227 |
+
except ErrorMeta as error_meta:
|
| 1228 |
+
error_metas.append(error_meta)
|
| 1229 |
+
# Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information
|
| 1230 |
+
# about what happened. If applicable, the exception should be expected in the future.
|
| 1231 |
+
except Exception as error:
|
| 1232 |
+
raise RuntimeError(
|
| 1233 |
+
f"Comparing\n\n"
|
| 1234 |
+
f"{pair}\n\n"
|
| 1235 |
+
f"resulted in the unexpected exception above. "
|
| 1236 |
+
f"If you are a user and see this message during normal operation "
|
| 1237 |
+
"please file an issue at https://github.com/pytorch/pytorch/issues. "
|
| 1238 |
+
"If you are a developer and working on the comparison functions, "
|
| 1239 |
+
"please except the previous error and raise an expressive `ErrorMeta` instead."
|
| 1240 |
+
) from error
|
| 1241 |
+
|
| 1242 |
+
# [ErrorMeta Cycles]
|
| 1243 |
+
# ErrorMeta objects in this list capture
|
| 1244 |
+
# tracebacks that refer to the frame of this function.
|
| 1245 |
+
# The local variable `error_metas` refers to the error meta
|
| 1246 |
+
# objects, creating a reference cycle. Frames in the traceback
|
| 1247 |
+
# would not get freed until cycle collection, leaking cuda memory in tests.
|
| 1248 |
+
# We break the cycle by removing the reference to the error_meta objects
|
| 1249 |
+
# from this frame as it returns.
|
| 1250 |
+
error_metas = [error_metas]
|
| 1251 |
+
return error_metas.pop()
|
| 1252 |
+
|
| 1253 |
+
|
| 1254 |
+
def assert_close(
|
| 1255 |
+
actual: Any,
|
| 1256 |
+
expected: Any,
|
| 1257 |
+
*,
|
| 1258 |
+
allow_subclasses: bool = True,
|
| 1259 |
+
rtol: Optional[float] = None,
|
| 1260 |
+
atol: Optional[float] = None,
|
| 1261 |
+
equal_nan: bool = False,
|
| 1262 |
+
check_device: bool = True,
|
| 1263 |
+
check_dtype: bool = True,
|
| 1264 |
+
check_layout: bool = True,
|
| 1265 |
+
check_stride: bool = False,
|
| 1266 |
+
msg: Optional[Union[str, Callable[[str], str]]] = None,
|
| 1267 |
+
):
|
| 1268 |
+
r"""Asserts that ``actual`` and ``expected`` are close.
|
| 1269 |
+
|
| 1270 |
+
If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if
|
| 1271 |
+
|
| 1272 |
+
.. math::
|
| 1273 |
+
|
| 1274 |
+
\lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert
|
| 1275 |
+
|
| 1276 |
+
Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are
|
| 1277 |
+
only considered equal to each other if ``equal_nan`` is ``True``.
|
| 1278 |
+
|
| 1279 |
+
In addition, they are only considered close if they have the same
|
| 1280 |
+
|
| 1281 |
+
- :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``),
|
| 1282 |
+
- ``dtype`` (if ``check_dtype`` is ``True``),
|
| 1283 |
+
- ``layout`` (if ``check_layout`` is ``True``), and
|
| 1284 |
+
- stride (if ``check_stride`` is ``True``).
|
| 1285 |
+
|
| 1286 |
+
If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed.
|
| 1287 |
+
|
| 1288 |
+
If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are
|
| 1289 |
+
checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR,
|
| 1290 |
+
or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively,
|
| 1291 |
+
are always checked for equality whereas the values are checked for closeness according to the definition above.
|
| 1292 |
+
|
| 1293 |
+
If ``actual`` and ``expected`` are quantized, they are considered close if they have the same
|
| 1294 |
+
:meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the
|
| 1295 |
+
definition above.
|
| 1296 |
+
|
| 1297 |
+
``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which
|
| 1298 |
+
:class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types
|
| 1299 |
+
have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s
|
| 1300 |
+
or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all
|
| 1301 |
+
their elements are considered close according to the above definition.
|
| 1302 |
+
|
| 1303 |
+
.. note::
|
| 1304 |
+
|
| 1305 |
+
Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e.
|
| 1306 |
+
:class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus,
|
| 1307 |
+
Python scalars of different types can be checked, but require ``check_dtype=False``.
|
| 1308 |
+
|
| 1309 |
+
Args:
|
| 1310 |
+
actual (Any): Actual input.
|
| 1311 |
+
expected (Any): Expected input.
|
| 1312 |
+
allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types
|
| 1313 |
+
are allowed. Otherwise type equality is required.
|
| 1314 |
+
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
|
| 1315 |
+
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
|
| 1316 |
+
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
|
| 1317 |
+
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
|
| 1318 |
+
equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal.
|
| 1319 |
+
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
|
| 1320 |
+
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
|
| 1321 |
+
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
|
| 1322 |
+
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
|
| 1323 |
+
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
|
| 1324 |
+
:func:`torch.promote_types`) before being compared.
|
| 1325 |
+
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
|
| 1326 |
+
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
|
| 1327 |
+
compared.
|
| 1328 |
+
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
|
| 1329 |
+
msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during
|
| 1330 |
+
the comparison. Can also passed as callable in which case it will be called with the generated message and
|
| 1331 |
+
should return the new message.
|
| 1332 |
+
|
| 1333 |
+
Raises:
|
| 1334 |
+
ValueError: If no :class:`torch.Tensor` can be constructed from an input.
|
| 1335 |
+
ValueError: If only ``rtol`` or ``atol`` is specified.
|
| 1336 |
+
AssertionError: If corresponding inputs are not Python scalars and are not directly related.
|
| 1337 |
+
AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have
|
| 1338 |
+
different types.
|
| 1339 |
+
AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match.
|
| 1340 |
+
AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match.
|
| 1341 |
+
AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`.
|
| 1342 |
+
AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same
|
| 1343 |
+
:attr:`~torch.Tensor.layout`.
|
| 1344 |
+
AssertionError: If only one of corresponding tensors is quantized.
|
| 1345 |
+
AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s.
|
| 1346 |
+
AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same
|
| 1347 |
+
:attr:`~torch.Tensor.device`.
|
| 1348 |
+
AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``.
|
| 1349 |
+
AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride.
|
| 1350 |
+
AssertionError: If the values of corresponding tensors are not close according to the definition above.
|
| 1351 |
+
|
| 1352 |
+
The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching
|
| 1353 |
+
``dtype``'s, the maximum of both tolerances is used.
|
| 1354 |
+
|
| 1355 |
+
+---------------------------+------------+----------+
|
| 1356 |
+
| ``dtype`` | ``rtol`` | ``atol`` |
|
| 1357 |
+
+===========================+============+==========+
|
| 1358 |
+
| :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` |
|
| 1359 |
+
+---------------------------+------------+----------+
|
| 1360 |
+
| :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` |
|
| 1361 |
+
+---------------------------+------------+----------+
|
| 1362 |
+
| :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` |
|
| 1363 |
+
+---------------------------+------------+----------+
|
| 1364 |
+
| :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` |
|
| 1365 |
+
+---------------------------+------------+----------+
|
| 1366 |
+
| :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` |
|
| 1367 |
+
+---------------------------+------------+----------+
|
| 1368 |
+
| :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` |
|
| 1369 |
+
+---------------------------+------------+----------+
|
| 1370 |
+
| :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` |
|
| 1371 |
+
+---------------------------+------------+----------+
|
| 1372 |
+
| :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` |
|
| 1373 |
+
+---------------------------+------------+----------+
|
| 1374 |
+
| :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` |
|
| 1375 |
+
+---------------------------+------------+----------+
|
| 1376 |
+
| :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` |
|
| 1377 |
+
+---------------------------+------------+----------+
|
| 1378 |
+
| :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` |
|
| 1379 |
+
+---------------------------+------------+----------+
|
| 1380 |
+
| :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` |
|
| 1381 |
+
+---------------------------+------------+----------+
|
| 1382 |
+
| other | ``0.0`` | ``0.0`` |
|
| 1383 |
+
+---------------------------+------------+----------+
|
| 1384 |
+
|
| 1385 |
+
.. note::
|
| 1386 |
+
|
| 1387 |
+
:func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged
|
| 1388 |
+
to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might
|
| 1389 |
+
define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default:
|
| 1390 |
+
|
| 1391 |
+
>>> import functools
|
| 1392 |
+
>>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
|
| 1393 |
+
>>> assert_equal(1e-9, 1e-10)
|
| 1394 |
+
Traceback (most recent call last):
|
| 1395 |
+
...
|
| 1396 |
+
AssertionError: Scalars are not equal!
|
| 1397 |
+
<BLANKLINE>
|
| 1398 |
+
Expected 1e-10 but got 1e-09.
|
| 1399 |
+
Absolute difference: 9.000000000000001e-10
|
| 1400 |
+
Relative difference: 9.0
|
| 1401 |
+
|
| 1402 |
+
Examples:
|
| 1403 |
+
>>> # tensor to tensor comparison
|
| 1404 |
+
>>> expected = torch.tensor([1e0, 1e-1, 1e-2])
|
| 1405 |
+
>>> actual = torch.acos(torch.cos(expected))
|
| 1406 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1407 |
+
|
| 1408 |
+
>>> # scalar to scalar comparison
|
| 1409 |
+
>>> import math
|
| 1410 |
+
>>> expected = math.sqrt(2.0)
|
| 1411 |
+
>>> actual = 2.0 / math.sqrt(2.0)
|
| 1412 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1413 |
+
|
| 1414 |
+
>>> # numpy array to numpy array comparison
|
| 1415 |
+
>>> import numpy as np
|
| 1416 |
+
>>> expected = np.array([1e0, 1e-1, 1e-2])
|
| 1417 |
+
>>> actual = np.arccos(np.cos(expected))
|
| 1418 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1419 |
+
|
| 1420 |
+
>>> # sequence to sequence comparison
|
| 1421 |
+
>>> import numpy as np
|
| 1422 |
+
>>> # The types of the sequences do not have to match. They only have to have the same
|
| 1423 |
+
>>> # length and their elements have to match.
|
| 1424 |
+
>>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)]
|
| 1425 |
+
>>> actual = tuple(expected)
|
| 1426 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1427 |
+
|
| 1428 |
+
>>> # mapping to mapping comparison
|
| 1429 |
+
>>> from collections import OrderedDict
|
| 1430 |
+
>>> import numpy as np
|
| 1431 |
+
>>> foo = torch.tensor(1.0)
|
| 1432 |
+
>>> bar = 2.0
|
| 1433 |
+
>>> baz = np.array(3.0)
|
| 1434 |
+
>>> # The types and a possible ordering of mappings do not have to match. They only
|
| 1435 |
+
>>> # have to have the same set of keys and their elements have to match.
|
| 1436 |
+
>>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)])
|
| 1437 |
+
>>> actual = {"baz": baz, "bar": bar, "foo": foo}
|
| 1438 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1439 |
+
|
| 1440 |
+
>>> expected = torch.tensor([1.0, 2.0, 3.0])
|
| 1441 |
+
>>> actual = expected.clone()
|
| 1442 |
+
>>> # By default, directly related instances can be compared
|
| 1443 |
+
>>> torch.testing.assert_close(torch.nn.Parameter(actual), expected)
|
| 1444 |
+
>>> # This check can be made more strict with allow_subclasses=False
|
| 1445 |
+
>>> torch.testing.assert_close(
|
| 1446 |
+
... torch.nn.Parameter(actual), expected, allow_subclasses=False
|
| 1447 |
+
... )
|
| 1448 |
+
Traceback (most recent call last):
|
| 1449 |
+
...
|
| 1450 |
+
TypeError: No comparison pair was able to handle inputs of type
|
| 1451 |
+
<class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>.
|
| 1452 |
+
>>> # If the inputs are not directly related, they are never considered close
|
| 1453 |
+
>>> torch.testing.assert_close(actual.numpy(), expected)
|
| 1454 |
+
Traceback (most recent call last):
|
| 1455 |
+
...
|
| 1456 |
+
TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'>
|
| 1457 |
+
and <class 'torch.Tensor'>.
|
| 1458 |
+
>>> # Exceptions to these rules are Python scalars. They can be checked regardless of
|
| 1459 |
+
>>> # their type if check_dtype=False.
|
| 1460 |
+
>>> torch.testing.assert_close(1.0, 1, check_dtype=False)
|
| 1461 |
+
|
| 1462 |
+
>>> # NaN != NaN by default.
|
| 1463 |
+
>>> expected = torch.tensor(float("Nan"))
|
| 1464 |
+
>>> actual = expected.clone()
|
| 1465 |
+
>>> torch.testing.assert_close(actual, expected)
|
| 1466 |
+
Traceback (most recent call last):
|
| 1467 |
+
...
|
| 1468 |
+
AssertionError: Scalars are not close!
|
| 1469 |
+
<BLANKLINE>
|
| 1470 |
+
Expected nan but got nan.
|
| 1471 |
+
Absolute difference: nan (up to 1e-05 allowed)
|
| 1472 |
+
Relative difference: nan (up to 1.3e-06 allowed)
|
| 1473 |
+
>>> torch.testing.assert_close(actual, expected, equal_nan=True)
|
| 1474 |
+
|
| 1475 |
+
>>> expected = torch.tensor([1.0, 2.0, 3.0])
|
| 1476 |
+
>>> actual = torch.tensor([1.0, 4.0, 5.0])
|
| 1477 |
+
>>> # The default error message can be overwritten.
|
| 1478 |
+
>>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!")
|
| 1479 |
+
Traceback (most recent call last):
|
| 1480 |
+
...
|
| 1481 |
+
AssertionError: Argh, the tensors are not close!
|
| 1482 |
+
>>> # If msg is a callable, it can be used to augment the generated message with
|
| 1483 |
+
>>> # extra information
|
| 1484 |
+
>>> torch.testing.assert_close(
|
| 1485 |
+
... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter"
|
| 1486 |
+
... )
|
| 1487 |
+
Traceback (most recent call last):
|
| 1488 |
+
...
|
| 1489 |
+
AssertionError: Header
|
| 1490 |
+
<BLANKLINE>
|
| 1491 |
+
Tensor-likes are not close!
|
| 1492 |
+
<BLANKLINE>
|
| 1493 |
+
Mismatched elements: 2 / 3 (66.7%)
|
| 1494 |
+
Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed)
|
| 1495 |
+
Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed)
|
| 1496 |
+
<BLANKLINE>
|
| 1497 |
+
Footer
|
| 1498 |
+
"""
|
| 1499 |
+
# Hide this function from `pytest`'s traceback
|
| 1500 |
+
__tracebackhide__ = True
|
| 1501 |
+
|
| 1502 |
+
error_metas = not_close_error_metas(
|
| 1503 |
+
actual,
|
| 1504 |
+
expected,
|
| 1505 |
+
pair_types=(
|
| 1506 |
+
NonePair,
|
| 1507 |
+
BooleanPair,
|
| 1508 |
+
NumberPair,
|
| 1509 |
+
TensorLikePair,
|
| 1510 |
+
),
|
| 1511 |
+
allow_subclasses=allow_subclasses,
|
| 1512 |
+
rtol=rtol,
|
| 1513 |
+
atol=atol,
|
| 1514 |
+
equal_nan=equal_nan,
|
| 1515 |
+
check_device=check_device,
|
| 1516 |
+
check_dtype=check_dtype,
|
| 1517 |
+
check_layout=check_layout,
|
| 1518 |
+
check_stride=check_stride,
|
| 1519 |
+
msg=msg,
|
| 1520 |
+
)
|
| 1521 |
+
|
| 1522 |
+
if error_metas:
|
| 1523 |
+
# TODO: compose all metas into one AssertionError
|
| 1524 |
+
raise error_metas[0].to_error(msg)
|
| 1525 |
+
|
| 1526 |
+
|
| 1527 |
+
@deprecated(
|
| 1528 |
+
"`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. "
|
| 1529 |
+
"Please use `torch.testing.assert_close()` instead. "
|
| 1530 |
+
"You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.",
|
| 1531 |
+
category=FutureWarning,
|
| 1532 |
+
)
|
| 1533 |
+
def assert_allclose(
|
| 1534 |
+
actual: Any,
|
| 1535 |
+
expected: Any,
|
| 1536 |
+
rtol: Optional[float] = None,
|
| 1537 |
+
atol: Optional[float] = None,
|
| 1538 |
+
equal_nan: bool = True,
|
| 1539 |
+
msg: str = "",
|
| 1540 |
+
) -> None:
|
| 1541 |
+
"""
|
| 1542 |
+
.. warning::
|
| 1543 |
+
|
| 1544 |
+
:func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release.
|
| 1545 |
+
Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions
|
| 1546 |
+
`here <https://github.com/pytorch/pytorch/issues/61844>`_.
|
| 1547 |
+
"""
|
| 1548 |
+
if not isinstance(actual, torch.Tensor):
|
| 1549 |
+
actual = torch.tensor(actual)
|
| 1550 |
+
if not isinstance(expected, torch.Tensor):
|
| 1551 |
+
expected = torch.tensor(expected, dtype=actual.dtype)
|
| 1552 |
+
|
| 1553 |
+
if rtol is None and atol is None:
|
| 1554 |
+
rtol, atol = default_tolerances(
|
| 1555 |
+
actual,
|
| 1556 |
+
expected,
|
| 1557 |
+
dtype_precisions={
|
| 1558 |
+
torch.float16: (1e-3, 1e-3),
|
| 1559 |
+
torch.float32: (1e-4, 1e-5),
|
| 1560 |
+
torch.float64: (1e-5, 1e-8),
|
| 1561 |
+
},
|
| 1562 |
+
)
|
| 1563 |
+
|
| 1564 |
+
torch.testing.assert_close(
|
| 1565 |
+
actual,
|
| 1566 |
+
expected,
|
| 1567 |
+
rtol=rtol,
|
| 1568 |
+
atol=atol,
|
| 1569 |
+
equal_nan=equal_nan,
|
| 1570 |
+
check_device=True,
|
| 1571 |
+
check_dtype=False,
|
| 1572 |
+
check_stride=False,
|
| 1573 |
+
msg=msg or None,
|
| 1574 |
+
)
|
valley/lib/python3.10/site-packages/torch/testing/_creation.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains tensor creation utilities.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import collections.abc
|
| 6 |
+
import math
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import cast, List, Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
_INTEGRAL_TYPES = [
|
| 13 |
+
torch.uint8,
|
| 14 |
+
torch.int8,
|
| 15 |
+
torch.int16,
|
| 16 |
+
torch.int32,
|
| 17 |
+
torch.int64,
|
| 18 |
+
torch.uint16,
|
| 19 |
+
torch.uint32,
|
| 20 |
+
torch.uint64,
|
| 21 |
+
]
|
| 22 |
+
_FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
|
| 23 |
+
_FLOATING_8BIT_TYPES = [
|
| 24 |
+
torch.float8_e4m3fn,
|
| 25 |
+
torch.float8_e5m2,
|
| 26 |
+
torch.float8_e4m3fnuz,
|
| 27 |
+
torch.float8_e5m2fnuz,
|
| 28 |
+
]
|
| 29 |
+
_COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128]
|
| 30 |
+
_BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES]
|
| 31 |
+
_FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor:
|
| 35 |
+
# uniform_ requires to-from <= std::numeric_limits<scalar_t>::max()
|
| 36 |
+
# Work around this by scaling the range before and after the PRNG
|
| 37 |
+
if high - low >= torch.finfo(t.dtype).max:
|
| 38 |
+
return t.uniform_(low / 2, high / 2).mul_(2)
|
| 39 |
+
else:
|
| 40 |
+
return t.uniform_(low, high)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def make_tensor(
|
| 44 |
+
*shape: Union[int, torch.Size, List[int], Tuple[int, ...]],
|
| 45 |
+
dtype: torch.dtype,
|
| 46 |
+
device: Union[str, torch.device],
|
| 47 |
+
low: Optional[float] = None,
|
| 48 |
+
high: Optional[float] = None,
|
| 49 |
+
requires_grad: bool = False,
|
| 50 |
+
noncontiguous: bool = False,
|
| 51 |
+
exclude_zero: bool = False,
|
| 52 |
+
memory_format: Optional[torch.memory_format] = None,
|
| 53 |
+
) -> torch.Tensor:
|
| 54 |
+
r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with
|
| 55 |
+
values uniformly drawn from ``[low, high)``.
|
| 56 |
+
|
| 57 |
+
If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable
|
| 58 |
+
finite values then they are clamped to the lowest or highest representable finite value, respectively.
|
| 59 |
+
If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`,
|
| 60 |
+
which depend on :attr:`dtype`.
|
| 61 |
+
|
| 62 |
+
+---------------------------+------------+----------+
|
| 63 |
+
| ``dtype`` | ``low`` | ``high`` |
|
| 64 |
+
+===========================+============+==========+
|
| 65 |
+
| boolean type | ``0`` | ``2`` |
|
| 66 |
+
+---------------------------+------------+----------+
|
| 67 |
+
| unsigned integral type | ``0`` | ``10`` |
|
| 68 |
+
+---------------------------+------------+----------+
|
| 69 |
+
| signed integral types | ``-9`` | ``10`` |
|
| 70 |
+
+---------------------------+------------+----------+
|
| 71 |
+
| floating types | ``-9`` | ``9`` |
|
| 72 |
+
+---------------------------+------------+----------+
|
| 73 |
+
| complex types | ``-9`` | ``9`` |
|
| 74 |
+
+---------------------------+------------+----------+
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor.
|
| 78 |
+
dtype (:class:`torch.dtype`): The data type of the returned tensor.
|
| 79 |
+
device (Union[str, torch.device]): The device of the returned tensor.
|
| 80 |
+
low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is
|
| 81 |
+
clamped to the least representable finite value of the given dtype. When ``None`` (default),
|
| 82 |
+
this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
|
| 83 |
+
high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is
|
| 84 |
+
clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value
|
| 85 |
+
is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
|
| 86 |
+
|
| 87 |
+
.. deprecated:: 2.1
|
| 88 |
+
|
| 89 |
+
Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated
|
| 90 |
+
since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead.
|
| 91 |
+
|
| 92 |
+
requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``.
|
| 93 |
+
noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is
|
| 94 |
+
ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``.
|
| 95 |
+
exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value
|
| 96 |
+
depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating
|
| 97 |
+
point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the
|
| 98 |
+
:attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number
|
| 99 |
+
whose real and imaginary parts are both the smallest positive normal number representable by the complex
|
| 100 |
+
type. Default ``False``.
|
| 101 |
+
memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive
|
| 102 |
+
with ``noncontiguous``.
|
| 103 |
+
|
| 104 |
+
Raises:
|
| 105 |
+
ValueError: If ``requires_grad=True`` is passed for integral `dtype`
|
| 106 |
+
ValueError: If ``low >= high``.
|
| 107 |
+
ValueError: If either :attr:`low` or :attr:`high` is ``nan``.
|
| 108 |
+
ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed.
|
| 109 |
+
TypeError: If :attr:`dtype` isn't supported by this function.
|
| 110 |
+
|
| 111 |
+
Examples:
|
| 112 |
+
>>> # xdoctest: +SKIP
|
| 113 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
| 114 |
+
>>> from torch.testing import make_tensor
|
| 115 |
+
>>> # Creates a float tensor with values in [-1, 1)
|
| 116 |
+
>>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1)
|
| 117 |
+
>>> # xdoctest: +SKIP
|
| 118 |
+
tensor([ 0.1205, 0.2282, -0.6380])
|
| 119 |
+
>>> # Creates a bool tensor on CUDA
|
| 120 |
+
>>> make_tensor((2, 2), device='cuda', dtype=torch.bool)
|
| 121 |
+
tensor([[False, False],
|
| 122 |
+
[False, True]], device='cuda:0')
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
def modify_low_high(
|
| 126 |
+
low: Optional[float],
|
| 127 |
+
high: Optional[float],
|
| 128 |
+
*,
|
| 129 |
+
lowest_inclusive: float,
|
| 130 |
+
highest_exclusive: float,
|
| 131 |
+
default_low: float,
|
| 132 |
+
default_high: float,
|
| 133 |
+
) -> Tuple[float, float]:
|
| 134 |
+
"""
|
| 135 |
+
Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high)
|
| 136 |
+
if required.
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
def clamp(a: float, l: float, h: float) -> float:
|
| 140 |
+
return min(max(a, l), h)
|
| 141 |
+
|
| 142 |
+
low = low if low is not None else default_low
|
| 143 |
+
high = high if high is not None else default_high
|
| 144 |
+
|
| 145 |
+
if any(isinstance(value, float) and math.isnan(value) for value in [low, high]):
|
| 146 |
+
raise ValueError(
|
| 147 |
+
f"`low` and `high` cannot be NaN, but got {low=} and {high=}"
|
| 148 |
+
)
|
| 149 |
+
elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES:
|
| 150 |
+
warnings.warn(
|
| 151 |
+
"Passing `low==high` to `torch.testing.make_tensor` for floating or complex types "
|
| 152 |
+
"is deprecated since 2.1 and will be removed in 2.3. "
|
| 153 |
+
"Use `torch.full(...)` instead.",
|
| 154 |
+
FutureWarning,
|
| 155 |
+
stacklevel=3,
|
| 156 |
+
)
|
| 157 |
+
elif low >= high:
|
| 158 |
+
raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}")
|
| 159 |
+
elif high < lowest_inclusive or low >= highest_exclusive:
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"The value interval specified by `low` and `high` is [{low}, {high}), "
|
| 162 |
+
f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})"
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
low = clamp(low, lowest_inclusive, highest_exclusive)
|
| 166 |
+
high = clamp(high, lowest_inclusive, highest_exclusive)
|
| 167 |
+
|
| 168 |
+
if dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
|
| 169 |
+
# 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval
|
| 170 |
+
# 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of
|
| 171 |
+
# `torch.randint` is exclusive, and thus we need to ceil here as well.
|
| 172 |
+
return math.ceil(low), math.ceil(high)
|
| 173 |
+
|
| 174 |
+
return low, high
|
| 175 |
+
|
| 176 |
+
if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence):
|
| 177 |
+
shape = shape[0] # type: ignore[assignment]
|
| 178 |
+
shape = cast(Tuple[int, ...], tuple(shape))
|
| 179 |
+
|
| 180 |
+
if noncontiguous and memory_format is not None:
|
| 181 |
+
raise ValueError(
|
| 182 |
+
f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, "
|
| 183 |
+
f"but got {noncontiguous=} and {memory_format=}"
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
|
| 187 |
+
raise ValueError(
|
| 188 |
+
f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
if dtype is torch.bool:
|
| 192 |
+
low, high = cast(
|
| 193 |
+
Tuple[int, int],
|
| 194 |
+
modify_low_high(
|
| 195 |
+
low,
|
| 196 |
+
high,
|
| 197 |
+
lowest_inclusive=0,
|
| 198 |
+
highest_exclusive=2,
|
| 199 |
+
default_low=0,
|
| 200 |
+
default_high=2,
|
| 201 |
+
),
|
| 202 |
+
)
|
| 203 |
+
result = torch.randint(low, high, shape, device=device, dtype=dtype)
|
| 204 |
+
elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES:
|
| 205 |
+
low, high = cast(
|
| 206 |
+
Tuple[int, int],
|
| 207 |
+
modify_low_high(
|
| 208 |
+
low,
|
| 209 |
+
high,
|
| 210 |
+
lowest_inclusive=torch.iinfo(dtype).min,
|
| 211 |
+
highest_exclusive=torch.iinfo(dtype).max
|
| 212 |
+
# In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint`
|
| 213 |
+
# internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot
|
| 214 |
+
# sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here.
|
| 215 |
+
+ (1 if dtype is not torch.int64 else 0),
|
| 216 |
+
# This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`,
|
| 217 |
+
# _after_ we use the default value, we don't need to special case it here
|
| 218 |
+
default_low=-9,
|
| 219 |
+
default_high=10,
|
| 220 |
+
),
|
| 221 |
+
)
|
| 222 |
+
result = torch.randint(low, high, shape, device=device, dtype=dtype)
|
| 223 |
+
elif dtype in _FLOATING_OR_COMPLEX_TYPES:
|
| 224 |
+
low, high = modify_low_high(
|
| 225 |
+
low,
|
| 226 |
+
high,
|
| 227 |
+
lowest_inclusive=torch.finfo(dtype).min,
|
| 228 |
+
highest_exclusive=torch.finfo(dtype).max,
|
| 229 |
+
default_low=-9,
|
| 230 |
+
default_high=9,
|
| 231 |
+
)
|
| 232 |
+
result = torch.empty(shape, device=device, dtype=dtype)
|
| 233 |
+
_uniform_random_(
|
| 234 |
+
torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high
|
| 235 |
+
)
|
| 236 |
+
elif dtype in _FLOATING_8BIT_TYPES:
|
| 237 |
+
low, high = modify_low_high(
|
| 238 |
+
low,
|
| 239 |
+
high,
|
| 240 |
+
lowest_inclusive=torch.finfo(dtype).min,
|
| 241 |
+
highest_exclusive=torch.finfo(dtype).max,
|
| 242 |
+
default_low=-9,
|
| 243 |
+
default_high=9,
|
| 244 |
+
)
|
| 245 |
+
result = torch.empty(shape, device=device, dtype=torch.float32)
|
| 246 |
+
_uniform_random_(result, low, high)
|
| 247 |
+
result = result.to(dtype)
|
| 248 |
+
else:
|
| 249 |
+
raise TypeError(
|
| 250 |
+
f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()."
|
| 251 |
+
" To request support, file an issue at: https://github.com/pytorch/pytorch/issues"
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if noncontiguous and result.numel() > 1:
|
| 255 |
+
result = torch.repeat_interleave(result, 2, dim=-1)
|
| 256 |
+
result = result[..., ::2]
|
| 257 |
+
elif memory_format is not None:
|
| 258 |
+
result = result.clone(memory_format=memory_format)
|
| 259 |
+
|
| 260 |
+
if exclude_zero:
|
| 261 |
+
result[result == 0] = (
|
| 262 |
+
1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
if dtype in _FLOATING_OR_COMPLEX_TYPES:
|
| 266 |
+
result.requires_grad = requires_grad
|
| 267 |
+
|
| 268 |
+
return result
|
valley/lib/python3.10/site-packages/torch/testing/_internal/__init__.py
ADDED
|
File without changes
|
valley/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch.testing._internal.common_utils import TEST_WITH_ROCM
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class AutocastTestLists:
|
| 8 |
+
def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
|
| 9 |
+
input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
|
| 10 |
+
|
| 11 |
+
hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
|
| 12 |
+
torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
|
| 13 |
+
torch.randn((n, n), device=dev, dtype=torch.float32),)
|
| 14 |
+
|
| 15 |
+
weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
|
| 16 |
+
torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
|
| 17 |
+
torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
|
| 18 |
+
torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
|
| 19 |
+
|
| 20 |
+
# returns args as a tuple
|
| 21 |
+
return input + hx + weights
|
| 22 |
+
|
| 23 |
+
# Supplies ops and arguments for test_autocast_* in test/test_cuda.py
|
| 24 |
+
def __init__(self, dev):
|
| 25 |
+
super().__init__()
|
| 26 |
+
n = 8
|
| 27 |
+
# Utility arguments, created as one-element tuples
|
| 28 |
+
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
| 29 |
+
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
| 30 |
+
pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
| 31 |
+
mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
| 32 |
+
mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
| 33 |
+
mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
|
| 34 |
+
|
| 35 |
+
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
|
| 36 |
+
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
|
| 37 |
+
torch.randn(dimset, dtype=torch.float32, device=dev))
|
| 38 |
+
for dimset in dimsets]
|
| 39 |
+
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
|
| 40 |
+
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
|
| 41 |
+
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
| 42 |
+
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
| 43 |
+
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 44 |
+
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 45 |
+
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 46 |
+
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 47 |
+
|
| 48 |
+
# The lists below organize ops that autocast needs to test.
|
| 49 |
+
# self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
|
| 50 |
+
# Each op is associated with a tuple of valid arguments.
|
| 51 |
+
# In addition, cudnn conv ops are not supported on ROCm and hence will
|
| 52 |
+
# be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
|
| 53 |
+
|
| 54 |
+
# Some ops implement built-in type promotion. These don't need autocasting,
|
| 55 |
+
# but autocasting relies on their promotion, so we include tests to double-check.
|
| 56 |
+
self.torch_expect_builtin_promote = [
|
| 57 |
+
("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 58 |
+
("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 59 |
+
("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 60 |
+
("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 61 |
+
("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 62 |
+
("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 63 |
+
("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 64 |
+
("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 65 |
+
("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 66 |
+
("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
|
| 67 |
+
("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 68 |
+
("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
|
| 69 |
+
]
|
| 70 |
+
self.methods_expect_builtin_promote = [
|
| 71 |
+
("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 72 |
+
("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 73 |
+
("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 74 |
+
("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 75 |
+
("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 76 |
+
("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 77 |
+
("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 78 |
+
("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 79 |
+
("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
# The remaining lists organize ops that autocast treats explicitly.
|
| 83 |
+
self.torch_fp16 = [
|
| 84 |
+
# deprecated _convolution
|
| 85 |
+
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
|
| 86 |
+
(0, 0), 1, False, True, True)),
|
| 87 |
+
# the current _convolution
|
| 88 |
+
("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
|
| 89 |
+
(0, 0), 1, False, True, True, True)),
|
| 90 |
+
("conv1d", conv_args_fp32[0]),
|
| 91 |
+
("conv2d", conv_args_fp32[1]),
|
| 92 |
+
("conv3d", conv_args_fp32[2]),
|
| 93 |
+
("conv_tbc", conv_args_fp32[0] + bias_fp32),
|
| 94 |
+
("conv_transpose1d", conv_args_fp32[0]),
|
| 95 |
+
("conv_transpose2d", conv_args_fp32[1]),
|
| 96 |
+
("conv_transpose3d", conv_args_fp32[2]),
|
| 97 |
+
("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
|
| 98 |
+
("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
| 99 |
+
("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
|
| 100 |
+
(1, 1), 1, False, True, True), TEST_WITH_ROCM),
|
| 101 |
+
("prelu", pointwise0_fp32 + element0_fp32),
|
| 102 |
+
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
|
| 103 |
+
("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
|
| 104 |
+
("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
| 105 |
+
("matmul", mat0_fp32 + mat1_fp32),
|
| 106 |
+
("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
|
| 107 |
+
("mm", mat0_fp32 + mat1_fp32),
|
| 108 |
+
("mv", mat0_fp32 + pointwise0_fp32),
|
| 109 |
+
("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
|
| 110 |
+
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 111 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 112 |
+
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 113 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 114 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 115 |
+
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 116 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 117 |
+
# _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
|
| 118 |
+
# ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
| 119 |
+
# ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
|
| 120 |
+
("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
|
| 121 |
+
("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
|
| 122 |
+
("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
|
| 123 |
+
("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
|
| 124 |
+
]
|
| 125 |
+
self.torch_fp32 = [
|
| 126 |
+
("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
|
| 127 |
+
("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
|
| 128 |
+
("cosh", pointwise0_fp16),
|
| 129 |
+
("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
|
| 130 |
+
("exp", pointwise0_fp16),
|
| 131 |
+
("expm1", pointwise0_fp16),
|
| 132 |
+
("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
| 133 |
+
("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
| 134 |
+
("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
|
| 135 |
+
("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
|
| 136 |
+
("reciprocal", pointwise0_fp16),
|
| 137 |
+
("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
|
| 138 |
+
("sinh", pointwise0_fp16),
|
| 139 |
+
("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
|
| 140 |
+
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
|
| 141 |
+
("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
|
| 142 |
+
# ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
|
| 143 |
+
("softmax", pointwise0_fp16 + (0,)),
|
| 144 |
+
("log_softmax", pointwise0_fp16 + (0,)),
|
| 145 |
+
("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
|
| 146 |
+
("group_norm", mat0_fp16 + (1,)),
|
| 147 |
+
("norm", pointwise0_fp16),
|
| 148 |
+
("norm", pointwise0_fp16, {"dim": 0}),
|
| 149 |
+
# these need magma
|
| 150 |
+
# ("norm", mat0_fp16, {"p": "nuc"}),
|
| 151 |
+
# ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
|
| 152 |
+
("norm", pointwise0_fp16, {"p": 1}),
|
| 153 |
+
("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
|
| 154 |
+
("cosine_similarity", mat0_fp16 + mat1_fp16),
|
| 155 |
+
("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
|
| 156 |
+
("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
|
| 157 |
+
torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
|
| 158 |
+
torch.tensor([1], device=dev, dtype=torch.int))),
|
| 159 |
+
("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
|
| 160 |
+
("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
|
| 161 |
+
("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
|
| 162 |
+
("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
|
| 163 |
+
("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
|
| 164 |
+
("cumprod", pointwise0_fp16 + (0,)),
|
| 165 |
+
("cumsum", pointwise0_fp16 + (0,)),
|
| 166 |
+
("dist", pointwise0_fp16 + pointwise1_fp16),
|
| 167 |
+
("pdist", mat0_fp16),
|
| 168 |
+
("cdist", mat0_fp16 + mat1_fp16),
|
| 169 |
+
("prod", pointwise0_fp16),
|
| 170 |
+
("prod", pointwise0_fp16 + (0,)),
|
| 171 |
+
("renorm", mat0_fp16 + (2, 0, 1.0)),
|
| 172 |
+
("sum", pointwise0_fp16),
|
| 173 |
+
("sum", mat0_fp16 + (1,)),
|
| 174 |
+
("logsumexp", mat0_fp16 + (1,)),
|
| 175 |
+
]
|
| 176 |
+
self.torch_need_autocast_promote = [
|
| 177 |
+
("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
|
| 178 |
+
("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
|
| 179 |
+
("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
|
| 180 |
+
("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
|
| 181 |
+
torch.randn((1, 2), dtype=torch.float32, device=dev),
|
| 182 |
+
torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
|
| 183 |
+
torch.randn((1,), dtype=torch.float32, device=dev))),
|
| 184 |
+
("cross", (torch.randn(3, dtype=torch.float32, device=dev),
|
| 185 |
+
torch.randn(3, dtype=torch.float16, device=dev))),
|
| 186 |
+
("dot", pointwise0_fp16 + pointwise1_fp32),
|
| 187 |
+
("vdot", pointwise0_fp16 + pointwise1_fp32),
|
| 188 |
+
("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
|
| 189 |
+
torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
|
| 190 |
+
0, 0, False)),
|
| 191 |
+
("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
|
| 192 |
+
torch.randn(1, device=dev, dtype=torch.float16))),
|
| 193 |
+
("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
|
| 194 |
+
torch.randn(1, device=dev, dtype=torch.float32))),
|
| 195 |
+
("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
|
| 196 |
+
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
|
| 197 |
+
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
|
| 198 |
+
0,
|
| 199 |
+
torch.randint(0, 2, (2, 2, 2), device=dev),
|
| 200 |
+
torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
|
| 201 |
+
("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
|
| 202 |
+
0,
|
| 203 |
+
torch.randint(0, 2, (2, 2, 2), device=dev),
|
| 204 |
+
torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
|
| 205 |
+
]
|
| 206 |
+
self.nn_fp16 = [
|
| 207 |
+
("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
|
| 208 |
+
]
|
| 209 |
+
self.nn_fp32 = [
|
| 210 |
+
("softplus", pointwise0_fp16),
|
| 211 |
+
("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
|
| 212 |
+
torch.zeros((n,), device=dev, dtype=torch.long))),
|
| 213 |
+
("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
|
| 214 |
+
torch.zeros((n, n, n), device=dev, dtype=torch.long))),
|
| 215 |
+
("l1_loss", mat0_fp16 + mat1_fp16),
|
| 216 |
+
("smooth_l1_loss", mat0_fp16 + mat1_fp16),
|
| 217 |
+
("mse_loss", mat0_fp16 + mat1_fp16),
|
| 218 |
+
("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
| 219 |
+
("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
| 220 |
+
("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
|
| 221 |
+
]
|
| 222 |
+
self.linalg_fp16 = [
|
| 223 |
+
("linalg_vecdot", mat0_fp32 + mat0_fp32),
|
| 224 |
+
("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
|
| 225 |
+
]
|
| 226 |
+
self.methods_fp16 = [
|
| 227 |
+
("__matmul__", mat0_fp32 + mat1_fp32)
|
| 228 |
+
]
|
| 229 |
+
self.methods_fp32 = [
|
| 230 |
+
("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
|
| 231 |
+
]
|
| 232 |
+
self.banned = [
|
| 233 |
+
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
|
| 234 |
+
torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
|
| 235 |
+
]
|
| 236 |
+
|
| 237 |
+
class AutocastCPUTestLists:
|
| 238 |
+
# Supplies ops and arguments for test_autocast_* in test/test_cpu.py
|
| 239 |
+
def __init__(self, dev):
|
| 240 |
+
super().__init__()
|
| 241 |
+
n = 8
|
| 242 |
+
# Utility arguments, created as one-element tuples
|
| 243 |
+
pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
| 244 |
+
pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
| 245 |
+
pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
|
| 246 |
+
mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
| 247 |
+
mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
| 248 |
+
mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
|
| 249 |
+
|
| 250 |
+
pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
| 251 |
+
pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
|
| 252 |
+
|
| 253 |
+
dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
|
| 254 |
+
|
| 255 |
+
dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
|
| 256 |
+
for dimset in dummy_dimsets]
|
| 257 |
+
|
| 258 |
+
dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
|
| 259 |
+
conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),
|
| 260 |
+
torch.randn(dimset, dtype=torch.bfloat16, device=dev))
|
| 261 |
+
for dimset in dimsets]
|
| 262 |
+
conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
|
| 263 |
+
torch.randn(dimset, dtype=torch.float32, device=dev))
|
| 264 |
+
for dimset in dimsets]
|
| 265 |
+
|
| 266 |
+
bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
|
| 267 |
+
element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
|
| 268 |
+
pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
| 269 |
+
pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
|
| 270 |
+
mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 271 |
+
mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 272 |
+
mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 273 |
+
mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
|
| 274 |
+
|
| 275 |
+
dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),)
|
| 276 |
+
for dimset in dummy_dimsets]
|
| 277 |
+
# The lists below organize ops that autocast needs to test.
|
| 278 |
+
# self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
|
| 279 |
+
# Each op is associated with a tuple of valid arguments.
|
| 280 |
+
|
| 281 |
+
# Some ops implement built-in type promotion. These don't need autocasting,
|
| 282 |
+
# but autocasting relies on their promotion, so we include tests to double-check.
|
| 283 |
+
self.torch_expect_builtin_promote = [
|
| 284 |
+
("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 285 |
+
("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 286 |
+
("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 287 |
+
("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 288 |
+
("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 289 |
+
("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 290 |
+
("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 291 |
+
("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 292 |
+
("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 293 |
+
]
|
| 294 |
+
|
| 295 |
+
self.methods_expect_builtin_promote = [
|
| 296 |
+
("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 297 |
+
("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 298 |
+
("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 299 |
+
("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 300 |
+
("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 301 |
+
("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
|
| 302 |
+
("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 303 |
+
("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 304 |
+
("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
|
| 305 |
+
]
|
| 306 |
+
# The remaining lists organize ops that autocast treats explicitly.
|
| 307 |
+
self.torch_16 = [
|
| 308 |
+
("conv1d", conv_args_fp32[0]),
|
| 309 |
+
("conv2d", conv_args_fp32[1]),
|
| 310 |
+
("conv3d", conv_args_fp32[2]),
|
| 311 |
+
("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 312 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 313 |
+
("mm", mat0_fp32 + mat1_fp32),
|
| 314 |
+
("matmul", mat0_fp32 + mat1_fp32),
|
| 315 |
+
("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 316 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 317 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 318 |
+
("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
|
| 319 |
+
("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 320 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32))),
|
| 321 |
+
("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
|
| 322 |
+
torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
|
| 323 |
+
torch.randn(5, device=dev, dtype=torch.float32),
|
| 324 |
+
0)),
|
| 325 |
+
("conv_transpose1d", conv_args_fp32[0]),
|
| 326 |
+
("conv_transpose2d", conv_args_fp32[1]),
|
| 327 |
+
("conv_transpose3d", conv_args_fp32[2]),
|
| 328 |
+
("prelu", pointwise0_fp32 + element0_fp32),
|
| 329 |
+
("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 330 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 331 |
+
torch.randn((n, n, n), device=dev, dtype=torch.float32),
|
| 332 |
+
n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32),
|
| 333 |
+
torch.randn((3 * n), device=dev, dtype=torch.float32),
|
| 334 |
+
torch.randn((n, n), device=dev, dtype=torch.float32),
|
| 335 |
+
torch.randn((n), device=dev, dtype=torch.float32))),
|
| 336 |
+
]
|
| 337 |
+
self.torch_fp32 = [
|
| 338 |
+
("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
|
| 339 |
+
("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16),
|
| 340 |
+
torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16),
|
| 341 |
+
torch.tensor([1], device=dev, dtype=torch.int))),
|
| 342 |
+
("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)),
|
| 343 |
+
("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)),
|
| 344 |
+
("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16),
|
| 345 |
+
("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
|
| 346 |
+
]
|
| 347 |
+
self.nn_16 = [
|
| 348 |
+
("linear", mat0_fp32 + mat1_fp32, {}),
|
| 349 |
+
]
|
| 350 |
+
self.nn_fp32 = [
|
| 351 |
+
("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
|
| 352 |
+
("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
|
| 353 |
+
(torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
|
| 354 |
+
("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
|
| 355 |
+
("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),
|
| 356 |
+
torch.zeros((n,), device=dev, dtype=torch.long))),
|
| 357 |
+
("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16),
|
| 358 |
+
torch.zeros((n, n, n), device=dev, dtype=torch.long))),
|
| 359 |
+
("l1_loss", mat0_bf16 + mat1_bf16),
|
| 360 |
+
("smooth_l1_loss", mat0_bf16 + mat1_bf16),
|
| 361 |
+
("mse_loss", mat0_bf16 + mat1_bf16),
|
| 362 |
+
("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
| 363 |
+
("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
|
| 364 |
+
("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
|
| 365 |
+
("huber_loss", mat0_bf16 + mat1_bf16),
|
| 366 |
+
]
|
| 367 |
+
self.torch_need_autocast_promote = [
|
| 368 |
+
("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
|
| 369 |
+
("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
|
| 370 |
+
]
|
valley/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py
ADDED
|
@@ -0,0 +1,635 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from functools import partial
|
| 5 |
+
from torch.testing import make_tensor
|
| 6 |
+
from torch.testing._internal.opinfo.core import (
|
| 7 |
+
OpInfo,
|
| 8 |
+
SampleInput,
|
| 9 |
+
)
|
| 10 |
+
from torch.testing._internal.common_dtype import all_types_and
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
# Note: [autograd.Function db]
|
| 14 |
+
#
|
| 15 |
+
# This is a collection of autograd.Function test cases written as OpInfos
|
| 16 |
+
# so they can easily be consumed by OpInfo-based tests to check if a subsystem
|
| 17 |
+
# supports autograd.Function.
|
| 18 |
+
#
|
| 19 |
+
# Axes:
|
| 20 |
+
# - saves {output, input, intermediate, non-tensor}
|
| 21 |
+
# - {inputs, output} x {single tensor, tensors, arbitrary objects}
|
| 22 |
+
# - Uses {mark_dirty, mark_non_differentiable, once_differentiable}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def to_numpy(tensor):
|
| 26 |
+
return tensor.cpu().numpy()
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class NumpyCube(torch.autograd.Function):
|
| 30 |
+
@staticmethod
|
| 31 |
+
def forward(input):
|
| 32 |
+
input_np = to_numpy(input)
|
| 33 |
+
dinput = torch.tensor(3 * input_np ** 2, device=input.device)
|
| 34 |
+
return torch.tensor(input_np ** 3, device=input.device), dinput
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def setup_context(ctx, inputs, output):
|
| 38 |
+
ctx.save_for_backward(inputs[0], output[1])
|
| 39 |
+
ctx.save_for_forward(inputs[0], output[1])
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def backward(ctx, grad_output, grad_saved):
|
| 43 |
+
input, dinput = ctx.saved_tensors
|
| 44 |
+
return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
def vmap(info, in_dims, input):
|
| 48 |
+
result = NumpyCube.apply(input)
|
| 49 |
+
return result, (in_dims[0], in_dims[0])
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def jvp(ctx, input_tangent):
|
| 53 |
+
input, dinput = ctx.saved_tensors
|
| 54 |
+
return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class CubeGenVmap(torch.autograd.Function):
|
| 58 |
+
generate_vmap_rule = True
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def forward(x):
|
| 62 |
+
return x ** 3, 3 * x ** 2
|
| 63 |
+
|
| 64 |
+
@staticmethod
|
| 65 |
+
def setup_context(ctx, inputs, outputs):
|
| 66 |
+
ctx.save_for_backward(inputs[0], outputs[1])
|
| 67 |
+
ctx.save_for_forward(inputs[0], outputs[1])
|
| 68 |
+
|
| 69 |
+
@staticmethod
|
| 70 |
+
def backward(ctx, grad_output, grad_saved):
|
| 71 |
+
input, dinput = ctx.saved_tensors
|
| 72 |
+
result = grad_output * dinput + 6 * dinput
|
| 73 |
+
return result
|
| 74 |
+
|
| 75 |
+
@staticmethod
|
| 76 |
+
def jvp(ctx, input_tangent):
|
| 77 |
+
input, dinput = ctx.saved_tensors
|
| 78 |
+
return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs):
|
| 82 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 83 |
+
yield SampleInput(make_arg(1, low=0.8, high=2), args=())
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class NumpyCubeNotComposable(torch.autograd.Function):
|
| 87 |
+
@staticmethod
|
| 88 |
+
def forward(input):
|
| 89 |
+
input_np = to_numpy(input)
|
| 90 |
+
return torch.tensor(input_np ** 3, device=input.device), input_np
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def setup_context(ctx, inputs, output):
|
| 94 |
+
_, input_np = output
|
| 95 |
+
ctx.input_np = input_np
|
| 96 |
+
ctx.device = inputs[0].device
|
| 97 |
+
|
| 98 |
+
@staticmethod
|
| 99 |
+
@torch.autograd.function.once_differentiable
|
| 100 |
+
def backward(ctx, grad_output, grad_saved):
|
| 101 |
+
result_np = 3 * (ctx.input_np ** 2)
|
| 102 |
+
return torch.tensor(result_np, device=ctx.device)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
class NumpyMul(torch.autograd.Function):
|
| 106 |
+
@staticmethod
|
| 107 |
+
def forward(x, y):
|
| 108 |
+
return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def setup_context(ctx, inputs, output):
|
| 112 |
+
ctx.save_for_backward(*inputs)
|
| 113 |
+
ctx.save_for_forward(*inputs)
|
| 114 |
+
|
| 115 |
+
@staticmethod
|
| 116 |
+
def backward(ctx, grad_output):
|
| 117 |
+
x, y = ctx.saved_tensors
|
| 118 |
+
gx = None
|
| 119 |
+
if ctx.needs_input_grad[0]:
|
| 120 |
+
gx = NumpyMul.apply(grad_output, y)
|
| 121 |
+
gy = None
|
| 122 |
+
if ctx.needs_input_grad[1]:
|
| 123 |
+
gy = NumpyMul.apply(grad_output, x)
|
| 124 |
+
return gx, gy
|
| 125 |
+
|
| 126 |
+
@staticmethod
|
| 127 |
+
def vmap(info, in_dims, x, y):
|
| 128 |
+
x_bdim, y_bdim = in_dims
|
| 129 |
+
x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
|
| 130 |
+
y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
|
| 131 |
+
result = NumpyMul.apply(x, y)
|
| 132 |
+
result = result.movedim(-1, 0)
|
| 133 |
+
return result, 0
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def jvp(ctx, x_tangent, y_tangent):
|
| 137 |
+
x, y = ctx.saved_tensors
|
| 138 |
+
return x_tangent * y + y_tangent * x
|
| 139 |
+
|
| 140 |
+
def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
|
| 141 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 142 |
+
# Broadcasting
|
| 143 |
+
yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
|
| 144 |
+
|
| 145 |
+
def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs):
|
| 146 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 147 |
+
yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14})
|
| 148 |
+
|
| 149 |
+
class MulGenVmap(torch.autograd.Function):
|
| 150 |
+
generate_vmap_rule = True
|
| 151 |
+
|
| 152 |
+
@staticmethod
|
| 153 |
+
def forward(x, y):
|
| 154 |
+
return x * y
|
| 155 |
+
|
| 156 |
+
@staticmethod
|
| 157 |
+
def setup_context(ctx, inputs, outputs):
|
| 158 |
+
ctx.save_for_backward(*inputs)
|
| 159 |
+
ctx.save_for_forward(*inputs)
|
| 160 |
+
|
| 161 |
+
@staticmethod
|
| 162 |
+
def backward(ctx, grad_output):
|
| 163 |
+
x, y = ctx.saved_tensors
|
| 164 |
+
gx = None
|
| 165 |
+
if ctx.needs_input_grad[0]:
|
| 166 |
+
gx = MulGenVmap.apply(grad_output, y)
|
| 167 |
+
gy = None
|
| 168 |
+
if ctx.needs_input_grad[1]:
|
| 169 |
+
gy = MulGenVmap.apply(grad_output, x)
|
| 170 |
+
return gx, gy
|
| 171 |
+
|
| 172 |
+
@staticmethod
|
| 173 |
+
def jvp(ctx, x_tangent, y_tangent):
|
| 174 |
+
x, y = ctx.saved_tensors
|
| 175 |
+
return x_tangent * y + y_tangent * x
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class NumpyExp_(torch.autograd.Function):
|
| 179 |
+
@staticmethod
|
| 180 |
+
def forward(x):
|
| 181 |
+
x_np = to_numpy(x)
|
| 182 |
+
np.exp(x_np, x_np)
|
| 183 |
+
return x
|
| 184 |
+
|
| 185 |
+
@staticmethod
|
| 186 |
+
def setup_context(ctx, inputs, output):
|
| 187 |
+
x, = inputs
|
| 188 |
+
ctx.mark_dirty(x)
|
| 189 |
+
ctx.save_for_backward(output)
|
| 190 |
+
ctx.save_for_forward(output)
|
| 191 |
+
|
| 192 |
+
@staticmethod
|
| 193 |
+
def backward(ctx, grad_output):
|
| 194 |
+
output, = ctx.saved_tensors
|
| 195 |
+
return NumpyMul.apply(grad_output, output)
|
| 196 |
+
|
| 197 |
+
@staticmethod
|
| 198 |
+
def vmap(info, in_dims, x):
|
| 199 |
+
NumpyExp_.apply(x)
|
| 200 |
+
return x, in_dims[0]
|
| 201 |
+
|
| 202 |
+
@staticmethod
|
| 203 |
+
def jvp(ctx, x_tangent):
|
| 204 |
+
# Doesn't call numpy operations because I didn't want to write NumpyMul_
|
| 205 |
+
output, = ctx.saved_tensors
|
| 206 |
+
x_tangent.mul_(output)
|
| 207 |
+
return x_tangent
|
| 208 |
+
|
| 209 |
+
class NumpySort(torch.autograd.Function):
|
| 210 |
+
@staticmethod
|
| 211 |
+
def forward(x, dim):
|
| 212 |
+
device = x.device
|
| 213 |
+
x = to_numpy(x)
|
| 214 |
+
ind = np.argsort(x, axis=dim)
|
| 215 |
+
ind_inv = np.argsort(ind, axis=dim)
|
| 216 |
+
result = np.take_along_axis(x, ind, axis=dim)
|
| 217 |
+
return (
|
| 218 |
+
torch.tensor(x, device=device),
|
| 219 |
+
torch.tensor(ind, device=device),
|
| 220 |
+
torch.tensor(ind_inv, device=device),
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
@staticmethod
|
| 224 |
+
def setup_context(ctx, inputs, output):
|
| 225 |
+
x, dim = inputs
|
| 226 |
+
_, ind, ind_inv = output
|
| 227 |
+
ctx.mark_non_differentiable(ind, ind_inv)
|
| 228 |
+
ctx.save_for_backward(ind, ind_inv)
|
| 229 |
+
ctx.save_for_forward(ind, ind_inv)
|
| 230 |
+
ctx.dim = dim
|
| 231 |
+
|
| 232 |
+
@staticmethod
|
| 233 |
+
def backward(ctx, grad_output, _0, _1):
|
| 234 |
+
ind, ind_inv = ctx.saved_tensors
|
| 235 |
+
return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None
|
| 236 |
+
|
| 237 |
+
@staticmethod
|
| 238 |
+
def vmap(info, in_dims, x, dim):
|
| 239 |
+
x_bdim, _ = in_dims
|
| 240 |
+
x = x.movedim(x_bdim, 0)
|
| 241 |
+
# wrap dim
|
| 242 |
+
dim = dim if dim >= 0 else dim + x.dim() - 1
|
| 243 |
+
return NumpySort.apply(x, dim + 1), (0, 0, 0)
|
| 244 |
+
|
| 245 |
+
@staticmethod
|
| 246 |
+
def jvp(ctx, x_tangent, _):
|
| 247 |
+
ind, ind_inv = ctx.saved_tensors
|
| 248 |
+
return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
|
| 249 |
+
|
| 250 |
+
class SortGenVmap(torch.autograd.Function):
|
| 251 |
+
generate_vmap_rule = True
|
| 252 |
+
|
| 253 |
+
@staticmethod
|
| 254 |
+
def forward(x, dim):
|
| 255 |
+
device = x.device
|
| 256 |
+
ind = torch.argsort(x, dim=dim)
|
| 257 |
+
ind_inv = torch.argsort(ind, axis=dim)
|
| 258 |
+
result = torch.take_along_dim(x, ind, dim=dim)
|
| 259 |
+
return result, ind, ind_inv
|
| 260 |
+
|
| 261 |
+
@staticmethod
|
| 262 |
+
def setup_context(ctx, inputs, outputs):
|
| 263 |
+
x, dim = inputs
|
| 264 |
+
_, ind, ind_inv = outputs
|
| 265 |
+
ctx.mark_non_differentiable(ind, ind_inv)
|
| 266 |
+
ctx.save_for_backward(ind, ind_inv)
|
| 267 |
+
ctx.save_for_forward(ind, ind_inv)
|
| 268 |
+
ctx.dim = dim
|
| 269 |
+
|
| 270 |
+
@staticmethod
|
| 271 |
+
def backward(ctx, grad_output, _0, _1):
|
| 272 |
+
ind, ind_inv = ctx.saved_tensors
|
| 273 |
+
return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None
|
| 274 |
+
|
| 275 |
+
@staticmethod
|
| 276 |
+
def jvp(ctx, x_tangent, _):
|
| 277 |
+
ind, ind_inv = ctx.saved_tensors
|
| 278 |
+
return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs):
|
| 282 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 283 |
+
yield SampleInput(make_arg(3, 5), args=(1,))
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs):
|
| 287 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 288 |
+
tensor = make_arg(3, 5)
|
| 289 |
+
dim = 1
|
| 290 |
+
_, ind, ind_inv = NumpySort.apply(tensor, 1)
|
| 291 |
+
yield SampleInput(tensor, args=(ind, ind_inv, dim))
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class NumpyTake(torch.autograd.Function):
|
| 295 |
+
@staticmethod
|
| 296 |
+
def forward(x, ind, ind_inv, dim):
|
| 297 |
+
device = x.device
|
| 298 |
+
x = to_numpy(x)
|
| 299 |
+
ind = to_numpy(ind)
|
| 300 |
+
return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
|
| 301 |
+
|
| 302 |
+
@staticmethod
|
| 303 |
+
def setup_context(ctx, inputs, output):
|
| 304 |
+
x, ind, ind_inv, dim = inputs
|
| 305 |
+
ctx.save_for_backward(ind, ind_inv)
|
| 306 |
+
ctx.save_for_forward(ind, ind_inv)
|
| 307 |
+
ctx.dim = dim
|
| 308 |
+
|
| 309 |
+
@staticmethod
|
| 310 |
+
def backward(ctx, grad_output):
|
| 311 |
+
ind, ind_inv = ctx.saved_tensors
|
| 312 |
+
result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
|
| 313 |
+
return result, None, None, None
|
| 314 |
+
|
| 315 |
+
@staticmethod
|
| 316 |
+
def vmap(info, in_dims, x, ind, ind_inv, dim):
|
| 317 |
+
x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
|
| 318 |
+
|
| 319 |
+
# wrap dim
|
| 320 |
+
logical_dim = x.dim() if x_bdim is None else x_bdim - 1
|
| 321 |
+
dim = dim if dim >= 0 else dim + logical_dim
|
| 322 |
+
|
| 323 |
+
def expand_bdim(x, x_bdim):
|
| 324 |
+
if x_bdim is None:
|
| 325 |
+
return x.expand(info.batch_size, *x.shape)
|
| 326 |
+
return x.movedim(x_bdim, 0)
|
| 327 |
+
|
| 328 |
+
x = expand_bdim(x, x_bdim)
|
| 329 |
+
ind = expand_bdim(ind, ind_bdim)
|
| 330 |
+
ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
|
| 331 |
+
|
| 332 |
+
return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
|
| 333 |
+
|
| 334 |
+
@staticmethod
|
| 335 |
+
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
|
| 336 |
+
assert ind_tangent is None
|
| 337 |
+
assert ind_inv_tangent is None
|
| 338 |
+
ind, ind_inv = ctx.saved_tensors
|
| 339 |
+
return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
|
| 340 |
+
|
| 341 |
+
class TakeGenVmap(torch.autograd.Function):
|
| 342 |
+
generate_vmap_rule = True
|
| 343 |
+
|
| 344 |
+
@staticmethod
|
| 345 |
+
def forward(x, ind, ind_inv, dim):
|
| 346 |
+
return torch.take_along_dim(x, ind, dim)
|
| 347 |
+
|
| 348 |
+
@staticmethod
|
| 349 |
+
def setup_context(ctx, inputs, outputs):
|
| 350 |
+
x, ind, ind_inv, dim = inputs
|
| 351 |
+
ctx.save_for_backward(ind, ind_inv)
|
| 352 |
+
ctx.save_for_forward(ind, ind_inv)
|
| 353 |
+
ctx.dim = dim
|
| 354 |
+
|
| 355 |
+
@staticmethod
|
| 356 |
+
def backward(ctx, grad_output):
|
| 357 |
+
ind, ind_inv = ctx.saved_tensors
|
| 358 |
+
result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
|
| 359 |
+
return result, None, None, None
|
| 360 |
+
|
| 361 |
+
@staticmethod
|
| 362 |
+
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
|
| 363 |
+
ind, ind_inv = ctx.saved_tensors
|
| 364 |
+
return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
|
| 365 |
+
|
| 366 |
+
class Select(torch.autograd.Function):
|
| 367 |
+
@staticmethod
|
| 368 |
+
def forward(x, idx):
|
| 369 |
+
return x[idx]
|
| 370 |
+
|
| 371 |
+
@staticmethod
|
| 372 |
+
def setup_context(ctx, inputs, output):
|
| 373 |
+
x, idx = inputs
|
| 374 |
+
ctx.x_shape = x.shape
|
| 375 |
+
ctx.idx = idx
|
| 376 |
+
|
| 377 |
+
@staticmethod
|
| 378 |
+
def backward(ctx, grad_output):
|
| 379 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
| 380 |
+
result[ctx.idx] = grad_output
|
| 381 |
+
return result, None
|
| 382 |
+
|
| 383 |
+
@staticmethod
|
| 384 |
+
def vmap(info, in_dims, x, idx):
|
| 385 |
+
x_bdim, _ = in_dims
|
| 386 |
+
x = x.movedim(x_bdim, 1)
|
| 387 |
+
return Select.apply(x, idx), 0
|
| 388 |
+
|
| 389 |
+
@staticmethod
|
| 390 |
+
def jvp(ctx, x_tangent, _):
|
| 391 |
+
return Select.apply(x_tangent, ctx.idx)
|
| 392 |
+
|
| 393 |
+
class SelectGenVmap(torch.autograd.Function):
|
| 394 |
+
generate_vmap_rule = True
|
| 395 |
+
|
| 396 |
+
@staticmethod
|
| 397 |
+
def forward(x, idx):
|
| 398 |
+
return x[idx]
|
| 399 |
+
|
| 400 |
+
@staticmethod
|
| 401 |
+
def setup_context(ctx, inputs, outputs):
|
| 402 |
+
x, idx = inputs
|
| 403 |
+
ctx.x_shape = x.shape
|
| 404 |
+
ctx.idx = idx
|
| 405 |
+
|
| 406 |
+
@staticmethod
|
| 407 |
+
def backward(ctx, grad_output):
|
| 408 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
| 409 |
+
result[ctx.idx] = grad_output
|
| 410 |
+
return result, None
|
| 411 |
+
|
| 412 |
+
@staticmethod
|
| 413 |
+
def jvp(ctx, x_tangent, _):
|
| 414 |
+
return SelectGenVmap.apply(x_tangent, ctx.idx)
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs):
|
| 418 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 419 |
+
yield SampleInput(make_arg(3, 5), args=(2,))
|
| 420 |
+
|
| 421 |
+
class ScaleGradGenVmap(torch.autograd.Function):
|
| 422 |
+
generate_vmap_rule = True
|
| 423 |
+
scale = 3.14
|
| 424 |
+
|
| 425 |
+
@staticmethod
|
| 426 |
+
def forward(x):
|
| 427 |
+
return x.clone()
|
| 428 |
+
|
| 429 |
+
@staticmethod
|
| 430 |
+
def setup_context(ctx, inputs, outputs):
|
| 431 |
+
pass
|
| 432 |
+
|
| 433 |
+
@staticmethod
|
| 434 |
+
def backward(ctx, grad_output):
|
| 435 |
+
return grad_output * ScaleGradGenVmap.scale
|
| 436 |
+
|
| 437 |
+
@staticmethod
|
| 438 |
+
def jvp(ctx, x_tangent):
|
| 439 |
+
return x_tangent * ScaleGradGenVmap.scale
|
| 440 |
+
|
| 441 |
+
class ZeroGradientsGenVmap(torch.autograd.Function):
|
| 442 |
+
generate_vmap_rule = True
|
| 443 |
+
|
| 444 |
+
@staticmethod
|
| 445 |
+
def forward(x, y):
|
| 446 |
+
return x.clone(), y.clone()
|
| 447 |
+
|
| 448 |
+
@staticmethod
|
| 449 |
+
def setup_context(ctx, inputs, outputs):
|
| 450 |
+
pass
|
| 451 |
+
|
| 452 |
+
@staticmethod
|
| 453 |
+
def backward(ctx, gx, gy):
|
| 454 |
+
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
|
| 455 |
+
# Also intentionally not None.
|
| 456 |
+
return (
|
| 457 |
+
# Intentionally too-large gradient
|
| 458 |
+
torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
|
| 459 |
+
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
@staticmethod
|
| 463 |
+
def jvp(ctx, gx, gy):
|
| 464 |
+
# Intentionally returning torch.zeros instead of zeros_like or new_zeros.
|
| 465 |
+
# Also intentionally not None.
|
| 466 |
+
return (
|
| 467 |
+
torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
|
| 468 |
+
torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
|
| 473 |
+
make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
|
| 474 |
+
yield SampleInput(make_arg(3, 5))
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
class ForwardHasDefaultArgs(torch.autograd.Function):
|
| 478 |
+
@staticmethod
|
| 479 |
+
def forward(x, idx=(2,)):
|
| 480 |
+
return x[idx]
|
| 481 |
+
|
| 482 |
+
@staticmethod
|
| 483 |
+
def setup_context(ctx, inputs, output):
|
| 484 |
+
x, idx = inputs
|
| 485 |
+
ctx.x_shape = x.shape
|
| 486 |
+
ctx.idx = idx
|
| 487 |
+
|
| 488 |
+
@staticmethod
|
| 489 |
+
def backward(ctx, grad_output):
|
| 490 |
+
result = grad_output.new_zeros(ctx.x_shape)
|
| 491 |
+
result[ctx.idx] = grad_output
|
| 492 |
+
return result, None
|
| 493 |
+
|
| 494 |
+
@staticmethod
|
| 495 |
+
def vmap(info, in_dims, x, idx):
|
| 496 |
+
x_bdim, _ = in_dims
|
| 497 |
+
x = x.movedim(x_bdim, 1)
|
| 498 |
+
return ForwardHasDefaultArgs.apply(x, idx), 0
|
| 499 |
+
|
| 500 |
+
@staticmethod
|
| 501 |
+
def jvp(ctx, x_tangent, _):
|
| 502 |
+
return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
autograd_function_db = [
|
| 506 |
+
OpInfo(
|
| 507 |
+
'NumpyCubeAutogradFunction',
|
| 508 |
+
op=NumpyCube.apply,
|
| 509 |
+
supports_forward_ad=True,
|
| 510 |
+
supports_fwgrad_bwgrad=True,
|
| 511 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
| 512 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 513 |
+
supports_out=False,
|
| 514 |
+
),
|
| 515 |
+
OpInfo(
|
| 516 |
+
'NumpyExpMarkDirtyAutogradFunction',
|
| 517 |
+
op=lambda x: NumpyExp_.apply(x.clone()),
|
| 518 |
+
inplace_variant=NumpyExp_.apply,
|
| 519 |
+
supports_forward_ad=True,
|
| 520 |
+
supports_fwgrad_bwgrad=True,
|
| 521 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
| 522 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 523 |
+
supports_out=False,
|
| 524 |
+
),
|
| 525 |
+
OpInfo(
|
| 526 |
+
'NumpyMulAutogradFunction',
|
| 527 |
+
op=NumpyMul.apply,
|
| 528 |
+
supports_forward_ad=True,
|
| 529 |
+
supports_fwgrad_bwgrad=True,
|
| 530 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
| 531 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 532 |
+
supports_out=False,
|
| 533 |
+
),
|
| 534 |
+
OpInfo(
|
| 535 |
+
'NumpyCubeNotComposableAutogradFunction',
|
| 536 |
+
op=lambda x: NumpyCubeNotComposable.apply(x)[0],
|
| 537 |
+
supports_forward_ad=False,
|
| 538 |
+
supports_fwgrad_bwgrad=False,
|
| 539 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
| 540 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 541 |
+
supports_out=False,
|
| 542 |
+
),
|
| 543 |
+
OpInfo(
|
| 544 |
+
'NumpySortAutogradFunction',
|
| 545 |
+
op=NumpySort.apply,
|
| 546 |
+
supports_forward_ad=False,
|
| 547 |
+
supports_fwgrad_bwgrad=False,
|
| 548 |
+
sample_inputs_func=sample_inputs_numpy_sort,
|
| 549 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 550 |
+
supports_out=False,
|
| 551 |
+
gradcheck_wrapper=lambda y, ind: y,
|
| 552 |
+
),
|
| 553 |
+
OpInfo(
|
| 554 |
+
'NumpyTakeAutogradFunction',
|
| 555 |
+
op=NumpyTake.apply,
|
| 556 |
+
supports_forward_ad=False,
|
| 557 |
+
supports_fwgrad_bwgrad=False,
|
| 558 |
+
sample_inputs_func=sample_inputs_numpy_take,
|
| 559 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 560 |
+
supports_out=False,
|
| 561 |
+
),
|
| 562 |
+
OpInfo(
|
| 563 |
+
'SelectAutogradFunction',
|
| 564 |
+
op=Select.apply,
|
| 565 |
+
supports_forward_ad=True,
|
| 566 |
+
supports_fwgrad_bwgrad=True,
|
| 567 |
+
sample_inputs_func=sample_inputs_select,
|
| 568 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 569 |
+
supports_out=False,
|
| 570 |
+
),
|
| 571 |
+
OpInfo(
|
| 572 |
+
'CubeGenVmapAutogradFunction',
|
| 573 |
+
op=CubeGenVmap.apply,
|
| 574 |
+
supports_forward_ad=True,
|
| 575 |
+
supports_fwgrad_bwgrad=True,
|
| 576 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
| 577 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 578 |
+
supports_out=False,
|
| 579 |
+
),
|
| 580 |
+
OpInfo(
|
| 581 |
+
'MulGenVmapAutogradFunction',
|
| 582 |
+
op=MulGenVmap.apply,
|
| 583 |
+
supports_forward_ad=True,
|
| 584 |
+
supports_fwgrad_bwgrad=True,
|
| 585 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
| 586 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 587 |
+
supports_out=False,
|
| 588 |
+
),
|
| 589 |
+
OpInfo(
|
| 590 |
+
'SortGenVmapAutogradFunction',
|
| 591 |
+
op=SortGenVmap.apply,
|
| 592 |
+
supports_forward_ad=True,
|
| 593 |
+
supports_fwgrad_bwgrad=True,
|
| 594 |
+
sample_inputs_func=sample_inputs_numpy_sort,
|
| 595 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 596 |
+
supports_out=False,
|
| 597 |
+
gradcheck_wrapper=lambda y, ind: y,
|
| 598 |
+
),
|
| 599 |
+
OpInfo(
|
| 600 |
+
'SelectGenVmapAutogradFunction',
|
| 601 |
+
op=SelectGenVmap.apply,
|
| 602 |
+
supports_forward_ad=True,
|
| 603 |
+
supports_fwgrad_bwgrad=True,
|
| 604 |
+
sample_inputs_func=sample_inputs_select,
|
| 605 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 606 |
+
supports_out=False,
|
| 607 |
+
),
|
| 608 |
+
OpInfo(
|
| 609 |
+
'ScaleGradGenVmapAutogradFunction',
|
| 610 |
+
op=ScaleGradGenVmap.apply,
|
| 611 |
+
supports_forward_ad=True,
|
| 612 |
+
supports_fwgrad_bwgrad=True,
|
| 613 |
+
sample_inputs_func=sample_inputs_numpy_cube,
|
| 614 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 615 |
+
supports_out=False,
|
| 616 |
+
),
|
| 617 |
+
OpInfo(
|
| 618 |
+
'ZeroGradientsGenVmapAutogradFunction',
|
| 619 |
+
op=ZeroGradientsGenVmap.apply,
|
| 620 |
+
supports_forward_ad=True,
|
| 621 |
+
supports_fwgrad_bwgrad=True,
|
| 622 |
+
sample_inputs_func=sample_inputs_numpy_mul,
|
| 623 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 624 |
+
supports_out=False,
|
| 625 |
+
),
|
| 626 |
+
OpInfo(
|
| 627 |
+
'ForwardHasDefaultArgsAutogradFunction',
|
| 628 |
+
op=ForwardHasDefaultArgs.apply,
|
| 629 |
+
supports_forward_ad=True,
|
| 630 |
+
supports_fwgrad_bwgrad=True,
|
| 631 |
+
sample_inputs_func=sample_inputs_forward_default_args,
|
| 632 |
+
dtypes=all_types_and(torch.bool, torch.half),
|
| 633 |
+
supports_out=False,
|
| 634 |
+
),
|
| 635 |
+
]
|
valley/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
r"""This file is allowed to initialize CUDA context when imported."""
|
| 4 |
+
|
| 5 |
+
import functools
|
| 6 |
+
import torch
|
| 7 |
+
import torch.cuda
|
| 8 |
+
from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS
|
| 9 |
+
import inspect
|
| 10 |
+
import contextlib
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized()
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
|
| 18 |
+
CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None
|
| 19 |
+
# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
|
| 20 |
+
if TEST_WITH_ROCM:
|
| 21 |
+
TEST_CUDNN = LazyVal(lambda: TEST_CUDA)
|
| 22 |
+
else:
|
| 23 |
+
TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE)))
|
| 24 |
+
|
| 25 |
+
TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0)
|
| 26 |
+
|
| 27 |
+
SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3))
|
| 28 |
+
SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0))
|
| 29 |
+
SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0))
|
| 30 |
+
SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5))
|
| 31 |
+
SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0))
|
| 32 |
+
SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0))
|
| 33 |
+
|
| 34 |
+
IS_JETSON = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() in [(7, 2), (8, 7)])
|
| 35 |
+
|
| 36 |
+
def evaluate_gfx_arch_exact(matching_arch):
|
| 37 |
+
if not torch.cuda.is_available():
|
| 38 |
+
return False
|
| 39 |
+
gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName
|
| 40 |
+
arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name)
|
| 41 |
+
return arch == matching_arch
|
| 42 |
+
|
| 43 |
+
GFX90A_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-'))
|
| 44 |
+
GFX942_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-'))
|
| 45 |
+
|
| 46 |
+
def evaluate_platform_supports_flash_attention():
|
| 47 |
+
if TEST_WITH_ROCM:
|
| 48 |
+
return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')
|
| 49 |
+
if TEST_CUDA:
|
| 50 |
+
return not IS_WINDOWS and SM80OrLater
|
| 51 |
+
return False
|
| 52 |
+
|
| 53 |
+
def evaluate_platform_supports_efficient_attention():
|
| 54 |
+
if TEST_WITH_ROCM:
|
| 55 |
+
return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')
|
| 56 |
+
if TEST_CUDA:
|
| 57 |
+
return True
|
| 58 |
+
return False
|
| 59 |
+
|
| 60 |
+
PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention())
|
| 61 |
+
PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_efficient_attention())
|
| 62 |
+
# TODO(eqy): gate this against a cuDNN version
|
| 63 |
+
PLATFORM_SUPPORTS_CUDNN_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM and
|
| 64 |
+
torch.backends.cuda.cudnn_sdp_enabled())
|
| 65 |
+
# This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate
|
| 66 |
+
PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION)
|
| 67 |
+
|
| 68 |
+
PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM
|
| 69 |
+
|
| 70 |
+
PLATFORM_SUPPORTS_BF16: bool = LazyVal(lambda: TEST_CUDA and SM80OrLater)
|
| 71 |
+
|
| 72 |
+
if TEST_NUMBA:
|
| 73 |
+
try:
|
| 74 |
+
import numba.cuda
|
| 75 |
+
TEST_NUMBA_CUDA = numba.cuda.is_available()
|
| 76 |
+
except Exception as e:
|
| 77 |
+
TEST_NUMBA_CUDA = False
|
| 78 |
+
TEST_NUMBA = False
|
| 79 |
+
else:
|
| 80 |
+
TEST_NUMBA_CUDA = False
|
| 81 |
+
|
| 82 |
+
# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
|
| 83 |
+
# RNG have been initialized.
|
| 84 |
+
__cuda_ctx_rng_initialized = False
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# after this call, CUDA context and RNG must have been initialized on each GPU
|
| 88 |
+
def initialize_cuda_context_rng():
|
| 89 |
+
global __cuda_ctx_rng_initialized
|
| 90 |
+
assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
|
| 91 |
+
if not __cuda_ctx_rng_initialized:
|
| 92 |
+
# initialize cuda context and rng for memory tests
|
| 93 |
+
for i in range(torch.cuda.device_count()):
|
| 94 |
+
torch.randn(1, device=f"cuda:{i}")
|
| 95 |
+
__cuda_ctx_rng_initialized = True
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Test whether hardware TF32 math mode enabled. It is enabled only on:
|
| 99 |
+
# - CUDA >= 11
|
| 100 |
+
# - arch >= Ampere
|
| 101 |
+
def tf32_is_not_fp32():
|
| 102 |
+
if not torch.cuda.is_available() or torch.version.cuda is None:
|
| 103 |
+
return False
|
| 104 |
+
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
|
| 105 |
+
return False
|
| 106 |
+
if int(torch.version.cuda.split('.')[0]) < 11:
|
| 107 |
+
return False
|
| 108 |
+
return True
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@contextlib.contextmanager
|
| 112 |
+
def tf32_off():
|
| 113 |
+
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
|
| 114 |
+
try:
|
| 115 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 116 |
+
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
|
| 117 |
+
yield
|
| 118 |
+
finally:
|
| 119 |
+
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@contextlib.contextmanager
|
| 123 |
+
def tf32_on(self, tf32_precision=1e-5):
|
| 124 |
+
old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32
|
| 125 |
+
old_precision = self.precision
|
| 126 |
+
try:
|
| 127 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 128 |
+
self.precision = tf32_precision
|
| 129 |
+
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
|
| 130 |
+
yield
|
| 131 |
+
finally:
|
| 132 |
+
torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul
|
| 133 |
+
self.precision = old_precision
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# This is a wrapper that wraps a test to run this test twice, one with
|
| 137 |
+
# allow_tf32=True, another with allow_tf32=False. When running with
|
| 138 |
+
# allow_tf32=True, it will use reduced precision as specified by the
|
| 139 |
+
# argument. For example:
|
| 140 |
+
# @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
| 141 |
+
# @tf32_on_and_off(0.005)
|
| 142 |
+
# def test_matmul(self, device, dtype):
|
| 143 |
+
# a = ...; b = ...;
|
| 144 |
+
# c = torch.matmul(a, b)
|
| 145 |
+
# self.assertEqual(c, expected)
|
| 146 |
+
# In the above example, when testing torch.float32 and torch.complex64 on CUDA
|
| 147 |
+
# on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at
|
| 148 |
+
# TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced
|
| 149 |
+
# precision to check values.
|
| 150 |
+
#
|
| 151 |
+
# This decorator can be used for function with or without device/dtype, such as
|
| 152 |
+
# @tf32_on_and_off(0.005)
|
| 153 |
+
# def test_my_op(self)
|
| 154 |
+
# @tf32_on_and_off(0.005)
|
| 155 |
+
# def test_my_op(self, device)
|
| 156 |
+
# @tf32_on_and_off(0.005)
|
| 157 |
+
# def test_my_op(self, device, dtype)
|
| 158 |
+
# @tf32_on_and_off(0.005)
|
| 159 |
+
# def test_my_op(self, dtype)
|
| 160 |
+
# if neither device nor dtype is specified, it will check if the system has ampere device
|
| 161 |
+
# if device is specified, it will check if device is cuda
|
| 162 |
+
# if dtype is specified, it will check if dtype is float32 or complex64
|
| 163 |
+
# tf32 and fp32 are different only when all the three checks pass
|
| 164 |
+
def tf32_on_and_off(tf32_precision=1e-5):
|
| 165 |
+
def with_tf32_disabled(self, function_call):
|
| 166 |
+
with tf32_off():
|
| 167 |
+
function_call()
|
| 168 |
+
|
| 169 |
+
def with_tf32_enabled(self, function_call):
|
| 170 |
+
with tf32_on(self, tf32_precision):
|
| 171 |
+
function_call()
|
| 172 |
+
|
| 173 |
+
def wrapper(f):
|
| 174 |
+
params = inspect.signature(f).parameters
|
| 175 |
+
arg_names = tuple(params.keys())
|
| 176 |
+
|
| 177 |
+
@functools.wraps(f)
|
| 178 |
+
def wrapped(*args, **kwargs):
|
| 179 |
+
for k, v in zip(arg_names, args):
|
| 180 |
+
kwargs[k] = v
|
| 181 |
+
cond = tf32_is_not_fp32()
|
| 182 |
+
if 'device' in kwargs:
|
| 183 |
+
cond = cond and (torch.device(kwargs['device']).type == 'cuda')
|
| 184 |
+
if 'dtype' in kwargs:
|
| 185 |
+
cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64})
|
| 186 |
+
if cond:
|
| 187 |
+
with_tf32_disabled(kwargs['self'], lambda: f(**kwargs))
|
| 188 |
+
with_tf32_enabled(kwargs['self'], lambda: f(**kwargs))
|
| 189 |
+
else:
|
| 190 |
+
f(**kwargs)
|
| 191 |
+
|
| 192 |
+
return wrapped
|
| 193 |
+
return wrapper
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
# This is a wrapper that wraps a test to run it with TF32 turned off.
|
| 197 |
+
# This wrapper is designed to be used when a test uses matmul or convolutions
|
| 198 |
+
# but the purpose of that test is not testing matmul or convolutions.
|
| 199 |
+
# Disabling TF32 will enforce torch.float tensors to be always computed
|
| 200 |
+
# at full precision.
|
| 201 |
+
def with_tf32_off(f):
|
| 202 |
+
@functools.wraps(f)
|
| 203 |
+
def wrapped(*args, **kwargs):
|
| 204 |
+
with tf32_off():
|
| 205 |
+
return f(*args, **kwargs)
|
| 206 |
+
|
| 207 |
+
return wrapped
|
| 208 |
+
|
| 209 |
+
def _get_magma_version():
|
| 210 |
+
if 'Magma' not in torch.__config__.show():
|
| 211 |
+
return (0, 0)
|
| 212 |
+
position = torch.__config__.show().find('Magma ')
|
| 213 |
+
version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0]
|
| 214 |
+
return tuple(int(x) for x in version_str.split("."))
|
| 215 |
+
|
| 216 |
+
def _get_torch_cuda_version():
|
| 217 |
+
if torch.version.cuda is None:
|
| 218 |
+
return (0, 0)
|
| 219 |
+
cuda_version = str(torch.version.cuda)
|
| 220 |
+
return tuple(int(x) for x in cuda_version.split("."))
|
| 221 |
+
|
| 222 |
+
def _get_torch_rocm_version():
|
| 223 |
+
if not TEST_WITH_ROCM:
|
| 224 |
+
return (0, 0)
|
| 225 |
+
rocm_version = str(torch.version.hip)
|
| 226 |
+
rocm_version = rocm_version.split("-")[0] # ignore git sha
|
| 227 |
+
return tuple(int(x) for x in rocm_version.split("."))
|
| 228 |
+
|
| 229 |
+
def _check_cusparse_generic_available():
|
| 230 |
+
return not TEST_WITH_ROCM
|
| 231 |
+
|
| 232 |
+
def _check_hipsparse_generic_available():
|
| 233 |
+
if not TEST_WITH_ROCM:
|
| 234 |
+
return False
|
| 235 |
+
|
| 236 |
+
rocm_version = str(torch.version.hip)
|
| 237 |
+
rocm_version = rocm_version.split("-")[0] # ignore git sha
|
| 238 |
+
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
|
| 239 |
+
return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1))
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available()
|
| 243 |
+
TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available()
|
| 244 |
+
|
| 245 |
+
# Shared by test_torch.py and test_multigpu.py
|
| 246 |
+
def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
|
| 247 |
+
# Create a module+optimizer that will use scaling, and a control module+optimizer
|
| 248 |
+
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
|
| 249 |
+
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
|
| 250 |
+
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
|
| 251 |
+
with torch.no_grad():
|
| 252 |
+
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
|
| 253 |
+
s.copy_(c)
|
| 254 |
+
|
| 255 |
+
kwargs = {"lr": 1.0}
|
| 256 |
+
if optimizer_kwargs is not None:
|
| 257 |
+
kwargs.update(optimizer_kwargs)
|
| 258 |
+
opt_control = optimizer_ctor(mod_control.parameters(), **kwargs)
|
| 259 |
+
opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs)
|
| 260 |
+
|
| 261 |
+
return mod_control, mod_scaling, opt_control, opt_scaling
|
| 262 |
+
|
| 263 |
+
# Shared by test_torch.py, test_cuda.py and test_multigpu.py
|
| 264 |
+
def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None):
|
| 265 |
+
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
|
| 266 |
+
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
|
| 267 |
+
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
|
| 268 |
+
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
|
| 269 |
+
|
| 270 |
+
loss_fn = torch.nn.MSELoss().to(device)
|
| 271 |
+
|
| 272 |
+
skip_iter = 2
|
| 273 |
+
|
| 274 |
+
return _create_scaling_models_optimizers(
|
| 275 |
+
device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs,
|
| 276 |
+
) + (data, loss_fn, skip_iter)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
# Importing this module should NOT eagerly initialize CUDA
|
| 280 |
+
if not CUDA_ALREADY_INITIALIZED_ON_IMPORT:
|
| 281 |
+
assert not torch.cuda.is_initialized()
|
valley/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py
ADDED
|
@@ -0,0 +1,1587 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
import gc
|
| 5 |
+
import inspect
|
| 6 |
+
import runpy
|
| 7 |
+
import sys
|
| 8 |
+
import threading
|
| 9 |
+
from collections import namedtuple
|
| 10 |
+
from enum import Enum
|
| 11 |
+
from functools import wraps, partial
|
| 12 |
+
from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set
|
| 13 |
+
import unittest
|
| 14 |
+
import os
|
| 15 |
+
import torch
|
| 16 |
+
from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \
|
| 17 |
+
skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \
|
| 18 |
+
IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, TEST_XPU, \
|
| 19 |
+
_TestParametrizer, compose_parametrize_fns, dtype_name, \
|
| 20 |
+
TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \
|
| 21 |
+
get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE, \
|
| 22 |
+
TEST_WITH_TORCHINDUCTOR
|
| 23 |
+
from torch.testing._internal.common_cuda import _get_torch_cuda_version, \
|
| 24 |
+
TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version
|
| 25 |
+
from torch.testing._internal.common_dtype import get_all_dtypes
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import psutil # type: ignore[import]
|
| 29 |
+
HAS_PSUTIL = True
|
| 30 |
+
except ImportError:
|
| 31 |
+
HAS_PSUTIL = False
|
| 32 |
+
|
| 33 |
+
# Note [Writing Test Templates]
|
| 34 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 35 |
+
#
|
| 36 |
+
# This note was written shortly after the PyTorch 1.9 release.
|
| 37 |
+
# If you notice it's out-of-date or think it could be improved then please
|
| 38 |
+
# file an issue.
|
| 39 |
+
#
|
| 40 |
+
# PyTorch has its own framework for instantiating test templates. That is, for
|
| 41 |
+
# taking test classes that look similar to unittest or pytest
|
| 42 |
+
# compatible test classes and optionally doing the following:
|
| 43 |
+
#
|
| 44 |
+
# - instantiating a version of the test class for each available device type
|
| 45 |
+
# (often the CPU, CUDA, and META device types)
|
| 46 |
+
# - further instantiating a version of each test that's always specialized
|
| 47 |
+
# on the test class's device type, and optionally specialized further
|
| 48 |
+
# on datatypes or operators
|
| 49 |
+
#
|
| 50 |
+
# This functionality is similar to pytest's parametrize functionality
|
| 51 |
+
# (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable
|
| 52 |
+
# additional logic that specializes the instantiated test classes for their
|
| 53 |
+
# device types (see CPUTestBase and CUDATestBase below), supports a variety
|
| 54 |
+
# of composable decorators that allow for test filtering and setting
|
| 55 |
+
# tolerances, and allows tests parametrized by operators to instantiate
|
| 56 |
+
# only the subset of device type x dtype that operator supports.
|
| 57 |
+
#
|
| 58 |
+
# This framework was built to make it easier to write tests that run on
|
| 59 |
+
# multiple device types, multiple datatypes (dtypes), and for multiple
|
| 60 |
+
# operators. It's also useful for controlling which tests are run. For example,
|
| 61 |
+
# only tests that use a CUDA device can be run on platforms with CUDA.
|
| 62 |
+
# Let's dive in with an example to get an idea for how it works:
|
| 63 |
+
#
|
| 64 |
+
# --------------------------------------------------------
|
| 65 |
+
# A template class (looks like a regular unittest TestCase)
|
| 66 |
+
# class TestClassFoo(TestCase):
|
| 67 |
+
#
|
| 68 |
+
# # A template test that can be specialized with a device
|
| 69 |
+
# # NOTE: this test case is not runnable by unittest or pytest because it
|
| 70 |
+
# # accepts an extra positional argument, "device", that they do not understand
|
| 71 |
+
# def test_bar(self, device):
|
| 72 |
+
# pass
|
| 73 |
+
#
|
| 74 |
+
# # Function that instantiates a template class and its tests
|
| 75 |
+
# instantiate_device_type_tests(TestCommon, globals())
|
| 76 |
+
# --------------------------------------------------------
|
| 77 |
+
#
|
| 78 |
+
# In the above code example we see a template class and a single test template
|
| 79 |
+
# that can be instantiated with a device. The function
|
| 80 |
+
# instantiate_device_type_tests(), called at file scope, instantiates
|
| 81 |
+
# new test classes, one per available device type, and new tests in those
|
| 82 |
+
# classes from these templates. It actually does this by removing
|
| 83 |
+
# the class TestClassFoo and replacing it with classes like TestClassFooCPU
|
| 84 |
+
# and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase
|
| 85 |
+
# and CUDATestBase respectively. Additional device types, like XLA,
|
| 86 |
+
# (see https://github.com/pytorch/xla) can further extend the set of
|
| 87 |
+
# instantiated test classes to create classes like TestClassFooXLA.
|
| 88 |
+
#
|
| 89 |
+
# The test template, test_bar(), is also instantiated. In this case the template
|
| 90 |
+
# is only specialized on a device, so (depending on the available device
|
| 91 |
+
# types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda()
|
| 92 |
+
# in TestClassFooCUDA. We can think of the instantiated test classes as
|
| 93 |
+
# looking like this:
|
| 94 |
+
#
|
| 95 |
+
# --------------------------------------------------------
|
| 96 |
+
# # An instantiated test class for the CPU device type
|
| 97 |
+
# class TestClassFooCPU(CPUTestBase):
|
| 98 |
+
#
|
| 99 |
+
# # An instantiated test that calls the template with the string representation
|
| 100 |
+
# # of a device from the test class's device type
|
| 101 |
+
# def test_bar_cpu(self):
|
| 102 |
+
# test_bar(self, 'cpu')
|
| 103 |
+
#
|
| 104 |
+
# # An instantiated test class for the CUDA device type
|
| 105 |
+
# class TestClassFooCUDA(CUDATestBase):
|
| 106 |
+
#
|
| 107 |
+
# # An instantiated test that calls the template with the string representation
|
| 108 |
+
# # of a device from the test class's device type
|
| 109 |
+
# def test_bar_cuda(self):
|
| 110 |
+
# test_bar(self, 'cuda:0')
|
| 111 |
+
# --------------------------------------------------------
|
| 112 |
+
#
|
| 113 |
+
# These instantiated test classes ARE discoverable and runnable by both
|
| 114 |
+
# unittest and pytest. One thing that may be confusing, however, is that
|
| 115 |
+
# attempting to run "test_bar" will not work, despite it appearing in the
|
| 116 |
+
# original template code. This is because "test_bar" is no longer discoverable
|
| 117 |
+
# after instantiate_device_type_tests() runs, as the above snippet shows.
|
| 118 |
+
# Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both
|
| 119 |
+
# can be run with the option "-k test_bar".
|
| 120 |
+
#
|
| 121 |
+
# Removing the template class and adding the instantiated classes requires
|
| 122 |
+
# passing "globals()" to instantiate_device_type_tests(), because it
|
| 123 |
+
# edits the file's Python objects.
|
| 124 |
+
#
|
| 125 |
+
# As mentioned, tests can be additionally parametrized on dtypes or
|
| 126 |
+
# operators. Datatype parametrization uses the @dtypes decorator and
|
| 127 |
+
# require a test template like this:
|
| 128 |
+
#
|
| 129 |
+
# --------------------------------------------------------
|
| 130 |
+
# # A template test that can be specialized with a device and a datatype (dtype)
|
| 131 |
+
# @dtypes(torch.float32, torch.int64)
|
| 132 |
+
# def test_car(self, device, dtype)
|
| 133 |
+
# pass
|
| 134 |
+
# --------------------------------------------------------
|
| 135 |
+
#
|
| 136 |
+
# If the CPU and CUDA device types are available this test would be
|
| 137 |
+
# instantiated as 4 tests that cover the cross-product of the two dtypes
|
| 138 |
+
# and two device types:
|
| 139 |
+
#
|
| 140 |
+
# - test_car_cpu_float32
|
| 141 |
+
# - test_car_cpu_int64
|
| 142 |
+
# - test_car_cuda_float32
|
| 143 |
+
# - test_car_cuda_int64
|
| 144 |
+
#
|
| 145 |
+
# The dtype is passed as a torch.dtype object.
|
| 146 |
+
#
|
| 147 |
+
# Tests parametrized on operators (actually on OpInfos, more on that in a
|
| 148 |
+
# moment...) use the @ops decorator and require a test template like this:
|
| 149 |
+
# --------------------------------------------------------
|
| 150 |
+
# # A template test that can be specialized with a device, dtype, and OpInfo
|
| 151 |
+
# @ops(op_db)
|
| 152 |
+
# def test_car(self, device, dtype, op)
|
| 153 |
+
# pass
|
| 154 |
+
# --------------------------------------------------------
|
| 155 |
+
#
|
| 156 |
+
# See the documentation for the @ops decorator below for additional details
|
| 157 |
+
# on how to use it and see the note [OpInfos] in
|
| 158 |
+
# common_methods_invocations.py for more details on OpInfos.
|
| 159 |
+
#
|
| 160 |
+
# A test parametrized over the entire "op_db", which contains hundreds of
|
| 161 |
+
# OpInfos, will likely have hundreds or thousands of instantiations. The
|
| 162 |
+
# test will be instantiated on the cross-product of device types, operators,
|
| 163 |
+
# and the dtypes the operator supports on that device type. The instantiated
|
| 164 |
+
# tests will have names like:
|
| 165 |
+
#
|
| 166 |
+
# - test_car_add_cpu_float32
|
| 167 |
+
# - test_car_sub_cuda_int64
|
| 168 |
+
#
|
| 169 |
+
# The first instantiated test calls the original test_car() with the OpInfo
|
| 170 |
+
# for torch.add as its "op" argument, the string 'cpu' for its "device" argument,
|
| 171 |
+
# and the dtype torch.float32 for is "dtype" argument. The second instantiated
|
| 172 |
+
# test calls the test_car() with the OpInfo for torch.sub, a CUDA device string
|
| 173 |
+
# like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype
|
| 174 |
+
# torch.int64 for its "dtype argument."
|
| 175 |
+
#
|
| 176 |
+
# In addition to parametrizing over device, dtype, and ops via OpInfos, the
|
| 177 |
+
# @parametrize decorator is supported for arbitrary parametrizations:
|
| 178 |
+
# --------------------------------------------------------
|
| 179 |
+
# # A template test that can be specialized with a device, dtype, and value for x
|
| 180 |
+
# @parametrize("x", range(5))
|
| 181 |
+
# def test_car(self, device, dtype, x)
|
| 182 |
+
# pass
|
| 183 |
+
# --------------------------------------------------------
|
| 184 |
+
#
|
| 185 |
+
# See the documentation for @parametrize in common_utils.py for additional details
|
| 186 |
+
# on this. Note that the instantiate_device_type_tests() function will handle
|
| 187 |
+
# such parametrizations; there is no need to additionally call
|
| 188 |
+
# instantiate_parametrized_tests().
|
| 189 |
+
#
|
| 190 |
+
# Clever test filtering can be very useful when working with parametrized
|
| 191 |
+
# tests. "-k test_car" would run every instantiated variant of the test_car()
|
| 192 |
+
# test template, and "-k test_car_add" runs every variant instantiated with
|
| 193 |
+
# torch.add.
|
| 194 |
+
#
|
| 195 |
+
# It is important to use the passed device and dtype as appropriate. Use
|
| 196 |
+
# helper functions like make_tensor() that require explicitly specifying
|
| 197 |
+
# the device and dtype so they're not forgotten.
|
| 198 |
+
#
|
| 199 |
+
# Test templates can use a variety of composable decorators to specify
|
| 200 |
+
# additional options and requirements, some are listed here:
|
| 201 |
+
#
|
| 202 |
+
# - @deviceCountAtLeast(<minimum number of devices to run test with>)
|
| 203 |
+
# Passes a list of strings representing all available devices of
|
| 204 |
+
# the test class's device type as the test template's "device" argument.
|
| 205 |
+
# If there are fewer devices than the value passed to the decorator
|
| 206 |
+
# the test is skipped.
|
| 207 |
+
# - @dtypes(<list of tuples of dtypes>)
|
| 208 |
+
# In addition to accepting multiple dtypes, the @dtypes decorator
|
| 209 |
+
# can accept a sequence of tuple pairs of dtypes. The test template
|
| 210 |
+
# will be called with each tuple for its "dtype" argument.
|
| 211 |
+
# - @onlyNativeDeviceTypes
|
| 212 |
+
# Skips the test if the device is not a native device type (currently CPU, CUDA, Meta)
|
| 213 |
+
# - @onlyCPU
|
| 214 |
+
# Skips the test if the device is not a CPU device
|
| 215 |
+
# - @onlyCUDA
|
| 216 |
+
# Skips the test if the device is not a CUDA device
|
| 217 |
+
# - @onlyMPS
|
| 218 |
+
# Skips the test if the device is not a MPS device
|
| 219 |
+
# - @skipCPUIfNoLapack
|
| 220 |
+
# Skips the test if the device is a CPU device and LAPACK is not installed
|
| 221 |
+
# - @skipCPUIfNoMkl
|
| 222 |
+
# Skips the test if the device is a CPU device and MKL is not installed
|
| 223 |
+
# - @skipCUDAIfNoMagma
|
| 224 |
+
# Skips the test if the device is a CUDA device and MAGMA is not installed
|
| 225 |
+
# - @skipCUDAIfRocm
|
| 226 |
+
# Skips the test if the device is a CUDA device and ROCm is being used
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# Note [Adding a Device Type]
|
| 230 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 231 |
+
#
|
| 232 |
+
# To add a device type:
|
| 233 |
+
#
|
| 234 |
+
# (1) Create a new "TestBase" extending DeviceTypeTestBase.
|
| 235 |
+
# See CPUTestBase and CUDATestBase below.
|
| 236 |
+
# (2) Define the "device_type" attribute of the base to be the
|
| 237 |
+
# appropriate string.
|
| 238 |
+
# (3) Add logic to this file that appends your base class to
|
| 239 |
+
# device_type_test_bases when your device type is available.
|
| 240 |
+
# (4) (Optional) Write setUpClass/tearDownClass class methods that
|
| 241 |
+
# instantiate dependencies (see MAGMA in CUDATestBase).
|
| 242 |
+
# (5) (Optional) Override the "instantiate_test" method for total
|
| 243 |
+
# control over how your class creates tests.
|
| 244 |
+
#
|
| 245 |
+
# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
|
| 246 |
+
# they are run. This makes it useful for initializing devices and dependencies.
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# Note [Overriding methods in generic tests]
|
| 250 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 251 |
+
#
|
| 252 |
+
# Device generic tests look a lot like normal test classes, but they differ
|
| 253 |
+
# from ordinary classes in some important ways. In particular, overriding
|
| 254 |
+
# methods in generic tests doesn't work quite the way you expect.
|
| 255 |
+
#
|
| 256 |
+
# class TestFooDeviceType(TestCase):
|
| 257 |
+
# # Intention is to override
|
| 258 |
+
# def assertEqual(self, x, y):
|
| 259 |
+
# # This DOESN'T WORK!
|
| 260 |
+
# super().assertEqual(x, y)
|
| 261 |
+
#
|
| 262 |
+
# If you try to run this code, you'll get an error saying that TestFooDeviceType
|
| 263 |
+
# is not in scope. This is because after instantiating our classes, we delete
|
| 264 |
+
# it from the parent scope. Instead, you need to hardcode a direct invocation
|
| 265 |
+
# of the desired subclass call, e.g.,
|
| 266 |
+
#
|
| 267 |
+
# class TestFooDeviceType(TestCase):
|
| 268 |
+
# # Intention is to override
|
| 269 |
+
# def assertEqual(self, x, y):
|
| 270 |
+
# TestCase.assertEqual(x, y)
|
| 271 |
+
#
|
| 272 |
+
# However, a less error-prone way of customizing the behavior of TestCase
|
| 273 |
+
# is to either (1) add your functionality to TestCase and make it toggled
|
| 274 |
+
# by a class attribute, or (2) create your own subclass of TestCase, and
|
| 275 |
+
# then inherit from it for your generic test.
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def _dtype_test_suffix(dtypes):
|
| 279 |
+
""" Returns the test suffix for a dtype, sequence of dtypes, or None. """
|
| 280 |
+
if isinstance(dtypes, (list, tuple)):
|
| 281 |
+
if len(dtypes) == 0:
|
| 282 |
+
return ''
|
| 283 |
+
return '_' + '_'.join(dtype_name(d) for d in dtypes)
|
| 284 |
+
elif dtypes:
|
| 285 |
+
return f'_{dtype_name(dtypes)}'
|
| 286 |
+
else:
|
| 287 |
+
return ''
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _update_param_kwargs(param_kwargs, name, value):
|
| 291 |
+
""" Adds a kwarg with the specified name and value to the param_kwargs dict. """
|
| 292 |
+
# Make name plural (e.g. devices / dtypes) if the value is composite.
|
| 293 |
+
plural_name = f'{name}s'
|
| 294 |
+
|
| 295 |
+
# Clear out old entries of the arg if any.
|
| 296 |
+
if name in param_kwargs:
|
| 297 |
+
del param_kwargs[name]
|
| 298 |
+
if plural_name in param_kwargs:
|
| 299 |
+
del param_kwargs[plural_name]
|
| 300 |
+
|
| 301 |
+
if isinstance(value, (list, tuple)):
|
| 302 |
+
param_kwargs[plural_name] = value
|
| 303 |
+
elif value is not None:
|
| 304 |
+
param_kwargs[name] = value
|
| 305 |
+
|
| 306 |
+
# Leave param_kwargs as-is when value is None.
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
class DeviceTypeTestBase(TestCase):
|
| 310 |
+
device_type: str = 'generic_device_type'
|
| 311 |
+
|
| 312 |
+
# Flag to disable test suite early due to unrecoverable error such as CUDA error.
|
| 313 |
+
_stop_test_suite = False
|
| 314 |
+
|
| 315 |
+
# Precision is a thread-local setting since it may be overridden per test
|
| 316 |
+
_tls = threading.local()
|
| 317 |
+
_tls.precision = TestCase._precision
|
| 318 |
+
_tls.rel_tol = TestCase._rel_tol
|
| 319 |
+
|
| 320 |
+
@property
|
| 321 |
+
def precision(self):
|
| 322 |
+
return self._tls.precision
|
| 323 |
+
|
| 324 |
+
@precision.setter
|
| 325 |
+
def precision(self, prec):
|
| 326 |
+
self._tls.precision = prec
|
| 327 |
+
|
| 328 |
+
@property
|
| 329 |
+
def rel_tol(self):
|
| 330 |
+
return self._tls.rel_tol
|
| 331 |
+
|
| 332 |
+
@rel_tol.setter
|
| 333 |
+
def rel_tol(self, prec):
|
| 334 |
+
self._tls.rel_tol = prec
|
| 335 |
+
|
| 336 |
+
# Returns a string representing the device that single device tests should use.
|
| 337 |
+
# Note: single device tests use this device exclusively.
|
| 338 |
+
@classmethod
|
| 339 |
+
def get_primary_device(cls):
|
| 340 |
+
return cls.device_type
|
| 341 |
+
|
| 342 |
+
@classmethod
|
| 343 |
+
def _init_and_get_primary_device(cls):
|
| 344 |
+
try:
|
| 345 |
+
return cls.get_primary_device()
|
| 346 |
+
except Exception:
|
| 347 |
+
# For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available
|
| 348 |
+
# until setUpClass() sets it. Call that manually here if needed.
|
| 349 |
+
if hasattr(cls, 'setUpClass'):
|
| 350 |
+
cls.setUpClass()
|
| 351 |
+
return cls.get_primary_device()
|
| 352 |
+
|
| 353 |
+
# Returns a list of strings representing all available devices of this
|
| 354 |
+
# device type. The primary device must be the first string in the list
|
| 355 |
+
# and the list must contain no duplicates.
|
| 356 |
+
# Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
|
| 357 |
+
# mechanism of acquiring all available devices.
|
| 358 |
+
@classmethod
|
| 359 |
+
def get_all_devices(cls):
|
| 360 |
+
return [cls.get_primary_device()]
|
| 361 |
+
|
| 362 |
+
# Returns the dtypes the test has requested.
|
| 363 |
+
# Prefers device-specific dtype specifications over generic ones.
|
| 364 |
+
@classmethod
|
| 365 |
+
def _get_dtypes(cls, test):
|
| 366 |
+
if not hasattr(test, 'dtypes'):
|
| 367 |
+
return None
|
| 368 |
+
|
| 369 |
+
default_dtypes = test.dtypes.get('all')
|
| 370 |
+
msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it"
|
| 371 |
+
assert default_dtypes is not None, msg
|
| 372 |
+
|
| 373 |
+
return test.dtypes.get(cls.device_type, default_dtypes)
|
| 374 |
+
|
| 375 |
+
def _get_precision_override(self, test, dtype):
|
| 376 |
+
if not hasattr(test, 'precision_overrides'):
|
| 377 |
+
return self.precision
|
| 378 |
+
return test.precision_overrides.get(dtype, self.precision)
|
| 379 |
+
|
| 380 |
+
def _get_tolerance_override(self, test, dtype):
|
| 381 |
+
if not hasattr(test, 'tolerance_overrides'):
|
| 382 |
+
return self.precision, self.rel_tol
|
| 383 |
+
return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol))
|
| 384 |
+
|
| 385 |
+
def _apply_precision_override_for_test(self, test, param_kwargs):
|
| 386 |
+
dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None
|
| 387 |
+
dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype
|
| 388 |
+
if dtype:
|
| 389 |
+
self.precision = self._get_precision_override(test, dtype)
|
| 390 |
+
self.precision, self.rel_tol = self._get_tolerance_override(test, dtype)
|
| 391 |
+
|
| 392 |
+
# Creates device-specific tests.
|
| 393 |
+
@classmethod
|
| 394 |
+
def instantiate_test(cls, name, test, *, generic_cls=None):
|
| 395 |
+
|
| 396 |
+
def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []):
|
| 397 |
+
# Add the device param kwarg if the test needs device or devices.
|
| 398 |
+
param_kwargs = {} if param_kwargs is None else param_kwargs
|
| 399 |
+
test_sig_params = inspect.signature(test).parameters
|
| 400 |
+
if 'device' in test_sig_params or 'devices' in test_sig_params:
|
| 401 |
+
device_arg: str = cls._init_and_get_primary_device()
|
| 402 |
+
if hasattr(test, 'num_required_devices'):
|
| 403 |
+
device_arg = cls.get_all_devices()
|
| 404 |
+
_update_param_kwargs(param_kwargs, 'device', device_arg)
|
| 405 |
+
|
| 406 |
+
# Apply decorators based on param kwargs.
|
| 407 |
+
for decorator in decorator_fn(param_kwargs):
|
| 408 |
+
test = decorator(test)
|
| 409 |
+
|
| 410 |
+
# Constructs the test
|
| 411 |
+
@wraps(test)
|
| 412 |
+
def instantiated_test(self, param_kwargs=param_kwargs):
|
| 413 |
+
# Sets precision and runs test
|
| 414 |
+
# Note: precision is reset after the test is run
|
| 415 |
+
guard_precision = self.precision
|
| 416 |
+
guard_rel_tol = self.rel_tol
|
| 417 |
+
try:
|
| 418 |
+
self._apply_precision_override_for_test(test, param_kwargs)
|
| 419 |
+
result = test(self, **param_kwargs)
|
| 420 |
+
except RuntimeError as rte:
|
| 421 |
+
# check if rte should stop entire test suite.
|
| 422 |
+
self._stop_test_suite = self._should_stop_test_suite()
|
| 423 |
+
# Check if test has been decorated with `@expectedFailure`
|
| 424 |
+
# Using `__unittest_expecting_failure__` attribute, see
|
| 425 |
+
# https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164
|
| 426 |
+
# In that case, make it fail with "unexpected success" by suppressing exception
|
| 427 |
+
if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite:
|
| 428 |
+
import sys
|
| 429 |
+
print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr)
|
| 430 |
+
return
|
| 431 |
+
# raise the runtime error as is for the test suite to record.
|
| 432 |
+
raise rte
|
| 433 |
+
finally:
|
| 434 |
+
self.precision = guard_precision
|
| 435 |
+
self.rel_tol = guard_rel_tol
|
| 436 |
+
|
| 437 |
+
return result
|
| 438 |
+
|
| 439 |
+
assert not hasattr(cls, name), f"Redefinition of test {name}"
|
| 440 |
+
setattr(cls, name, instantiated_test)
|
| 441 |
+
|
| 442 |
+
def default_parametrize_fn(test, generic_cls, device_cls):
|
| 443 |
+
# By default, no parametrization is needed.
|
| 444 |
+
yield (test, '', {}, lambda _: [])
|
| 445 |
+
|
| 446 |
+
# Parametrization decorators set the parametrize_fn attribute on the test.
|
| 447 |
+
parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn)
|
| 448 |
+
|
| 449 |
+
# If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it.
|
| 450 |
+
dtypes = cls._get_dtypes(test)
|
| 451 |
+
if dtypes is not None:
|
| 452 |
+
|
| 453 |
+
def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
|
| 454 |
+
for dtype in dtypes:
|
| 455 |
+
param_kwargs: Dict[str, Any] = {}
|
| 456 |
+
_update_param_kwargs(param_kwargs, "dtype", dtype)
|
| 457 |
+
|
| 458 |
+
# Note that an empty test suffix is set here so that the dtype can be appended
|
| 459 |
+
# later after the device.
|
| 460 |
+
yield (test, '', param_kwargs, lambda _: [])
|
| 461 |
+
|
| 462 |
+
parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)
|
| 463 |
+
|
| 464 |
+
# Instantiate the parametrized tests.
|
| 465 |
+
for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020
|
| 466 |
+
test_suffix = '' if test_suffix == '' else '_' + test_suffix
|
| 467 |
+
device_suffix = '_' + cls.device_type
|
| 468 |
+
|
| 469 |
+
# Note: device and dtype suffix placement
|
| 470 |
+
# Special handling here to place dtype(s) after device according to test name convention.
|
| 471 |
+
dtype_kwarg = None
|
| 472 |
+
if 'dtype' in param_kwargs or 'dtypes' in param_kwargs:
|
| 473 |
+
dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype']
|
| 474 |
+
test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}'
|
| 475 |
+
|
| 476 |
+
instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs,
|
| 477 |
+
decorator_fn=decorator_fn)
|
| 478 |
+
|
| 479 |
+
def run(self, result=None):
|
| 480 |
+
super().run(result=result)
|
| 481 |
+
# Early terminate test if _stop_test_suite is set.
|
| 482 |
+
if self._stop_test_suite:
|
| 483 |
+
result.stop()
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class CPUTestBase(DeviceTypeTestBase):
|
| 487 |
+
device_type = 'cpu'
|
| 488 |
+
|
| 489 |
+
# No critical error should stop CPU test suite
|
| 490 |
+
def _should_stop_test_suite(self):
|
| 491 |
+
return False
|
| 492 |
+
|
| 493 |
+
class CUDATestBase(DeviceTypeTestBase):
|
| 494 |
+
device_type = 'cuda'
|
| 495 |
+
_do_cuda_memory_leak_check = True
|
| 496 |
+
_do_cuda_non_default_stream = True
|
| 497 |
+
primary_device: ClassVar[str]
|
| 498 |
+
cudnn_version: ClassVar[Any]
|
| 499 |
+
no_magma: ClassVar[bool]
|
| 500 |
+
no_cudnn: ClassVar[bool]
|
| 501 |
+
|
| 502 |
+
def has_cudnn(self):
|
| 503 |
+
return not self.no_cudnn
|
| 504 |
+
|
| 505 |
+
@classmethod
|
| 506 |
+
def get_primary_device(cls):
|
| 507 |
+
return cls.primary_device
|
| 508 |
+
|
| 509 |
+
@classmethod
|
| 510 |
+
def get_all_devices(cls):
|
| 511 |
+
primary_device_idx = int(cls.get_primary_device().split(':')[1])
|
| 512 |
+
num_devices = torch.cuda.device_count()
|
| 513 |
+
|
| 514 |
+
prim_device = cls.get_primary_device()
|
| 515 |
+
cuda_str = 'cuda:{0}'
|
| 516 |
+
non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
|
| 517 |
+
return [prim_device] + non_primary_devices
|
| 518 |
+
|
| 519 |
+
@classmethod
|
| 520 |
+
def setUpClass(cls):
|
| 521 |
+
# has_magma shows up after cuda is initialized
|
| 522 |
+
t = torch.ones(1).cuda()
|
| 523 |
+
cls.no_magma = not torch.cuda.has_magma
|
| 524 |
+
|
| 525 |
+
# Determines if cuDNN is available and its version
|
| 526 |
+
cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
|
| 527 |
+
cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
|
| 528 |
+
|
| 529 |
+
# Acquires the current device as the primary (test) device
|
| 530 |
+
cls.primary_device = f'cuda:{torch.cuda.current_device()}'
|
| 531 |
+
|
| 532 |
+
# See Note [Lazy Tensor tests in device agnostic testing]
|
| 533 |
+
lazy_ts_backend_init = False
|
| 534 |
+
class LazyTestBase(DeviceTypeTestBase):
|
| 535 |
+
device_type = 'lazy'
|
| 536 |
+
|
| 537 |
+
def _should_stop_test_suite(self):
|
| 538 |
+
return False
|
| 539 |
+
|
| 540 |
+
@classmethod
|
| 541 |
+
def setUpClass(cls):
|
| 542 |
+
import torch._lazy
|
| 543 |
+
import torch._lazy.metrics
|
| 544 |
+
import torch._lazy.ts_backend
|
| 545 |
+
global lazy_ts_backend_init
|
| 546 |
+
if not lazy_ts_backend_init:
|
| 547 |
+
# Need to connect the TS backend to lazy key before running tests
|
| 548 |
+
torch._lazy.ts_backend.init()
|
| 549 |
+
lazy_ts_backend_init = True
|
| 550 |
+
|
| 551 |
+
class MPSTestBase(DeviceTypeTestBase):
|
| 552 |
+
device_type = 'mps'
|
| 553 |
+
primary_device: ClassVar[str]
|
| 554 |
+
|
| 555 |
+
@classmethod
|
| 556 |
+
def get_primary_device(cls):
|
| 557 |
+
return cls.primary_device
|
| 558 |
+
|
| 559 |
+
@classmethod
|
| 560 |
+
def get_all_devices(cls):
|
| 561 |
+
# currently only one device is supported on MPS backend
|
| 562 |
+
prim_device = cls.get_primary_device()
|
| 563 |
+
return [prim_device]
|
| 564 |
+
|
| 565 |
+
@classmethod
|
| 566 |
+
def setUpClass(cls):
|
| 567 |
+
cls.primary_device = 'mps:0'
|
| 568 |
+
|
| 569 |
+
def _should_stop_test_suite(self):
|
| 570 |
+
return False
|
| 571 |
+
|
| 572 |
+
class XPUTestBase(DeviceTypeTestBase):
|
| 573 |
+
device_type = 'xpu'
|
| 574 |
+
primary_device: ClassVar[str]
|
| 575 |
+
|
| 576 |
+
@classmethod
|
| 577 |
+
def get_primary_device(cls):
|
| 578 |
+
return cls.primary_device
|
| 579 |
+
|
| 580 |
+
@classmethod
|
| 581 |
+
def get_all_devices(cls):
|
| 582 |
+
# currently only one device is supported on MPS backend
|
| 583 |
+
prim_device = cls.get_primary_device()
|
| 584 |
+
return [prim_device]
|
| 585 |
+
|
| 586 |
+
@classmethod
|
| 587 |
+
def setUpClass(cls):
|
| 588 |
+
cls.primary_device = 'xpu:0'
|
| 589 |
+
|
| 590 |
+
def _should_stop_test_suite(self):
|
| 591 |
+
return False
|
| 592 |
+
|
| 593 |
+
class PrivateUse1TestBase(DeviceTypeTestBase):
|
| 594 |
+
primary_device: ClassVar[str]
|
| 595 |
+
device_mod = None
|
| 596 |
+
device_type = 'privateuse1'
|
| 597 |
+
|
| 598 |
+
@classmethod
|
| 599 |
+
def get_primary_device(cls):
|
| 600 |
+
return cls.primary_device
|
| 601 |
+
|
| 602 |
+
@classmethod
|
| 603 |
+
def get_all_devices(cls):
|
| 604 |
+
primary_device_idx = int(cls.get_primary_device().split(':')[1])
|
| 605 |
+
num_devices = cls.device_mod.device_count()
|
| 606 |
+
prim_device = cls.get_primary_device()
|
| 607 |
+
device_str = f'{cls.device_type}:{{0}}'
|
| 608 |
+
non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
|
| 609 |
+
return [prim_device] + non_primary_devices
|
| 610 |
+
|
| 611 |
+
@classmethod
|
| 612 |
+
def setUpClass(cls):
|
| 613 |
+
cls.device_type = torch._C._get_privateuse1_backend_name()
|
| 614 |
+
cls.device_mod = getattr(torch, cls.device_type, None)
|
| 615 |
+
assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register
|
| 616 |
+
a module by `torch._register_device_module`.'''
|
| 617 |
+
cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}'
|
| 618 |
+
|
| 619 |
+
# Adds available device-type-specific test base classes
|
| 620 |
+
def get_device_type_test_bases():
|
| 621 |
+
# set type to List[Any] due to mypy list-of-union issue:
|
| 622 |
+
# https://github.com/python/mypy/issues/3351
|
| 623 |
+
test_bases: List[Any] = list()
|
| 624 |
+
|
| 625 |
+
if IS_SANDCASTLE or IS_FBCODE:
|
| 626 |
+
if IS_REMOTE_GPU:
|
| 627 |
+
# Skip if sanitizer is enabled
|
| 628 |
+
if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
|
| 629 |
+
test_bases.append(CUDATestBase)
|
| 630 |
+
else:
|
| 631 |
+
test_bases.append(CPUTestBase)
|
| 632 |
+
else:
|
| 633 |
+
test_bases.append(CPUTestBase)
|
| 634 |
+
if torch.cuda.is_available():
|
| 635 |
+
test_bases.append(CUDATestBase)
|
| 636 |
+
|
| 637 |
+
device_type = torch._C._get_privateuse1_backend_name()
|
| 638 |
+
device_mod = getattr(torch, device_type, None)
|
| 639 |
+
if hasattr(device_mod, "is_available") and device_mod.is_available():
|
| 640 |
+
test_bases.append(PrivateUse1TestBase)
|
| 641 |
+
# Disable MPS testing in generic device testing temporarily while we're
|
| 642 |
+
# ramping up support.
|
| 643 |
+
# elif torch.backends.mps.is_available():
|
| 644 |
+
# test_bases.append(MPSTestBase)
|
| 645 |
+
|
| 646 |
+
return test_bases
|
| 647 |
+
|
| 648 |
+
device_type_test_bases = get_device_type_test_bases()
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None):
|
| 652 |
+
# device type cannot appear in both except_for and only_for
|
| 653 |
+
intersect = set(except_for if except_for else []) & set(only_for if only_for else [])
|
| 654 |
+
assert not intersect, f"device ({intersect}) appeared in both except_for and only_for"
|
| 655 |
+
|
| 656 |
+
if except_for:
|
| 657 |
+
device_type_test_bases = filter(
|
| 658 |
+
lambda x: x.device_type not in except_for, device_type_test_bases)
|
| 659 |
+
if only_for:
|
| 660 |
+
device_type_test_bases = filter(
|
| 661 |
+
lambda x: x.device_type in only_for, device_type_test_bases)
|
| 662 |
+
|
| 663 |
+
return list(device_type_test_bases)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
# Note [How to extend DeviceTypeTestBase to add new test device]
|
| 667 |
+
# The following logic optionally allows downstream projects like pytorch/xla to
|
| 668 |
+
# add more test devices.
|
| 669 |
+
# Instructions:
|
| 670 |
+
# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
|
| 671 |
+
# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
|
| 672 |
+
# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
|
| 673 |
+
# `instantiate_test` method.
|
| 674 |
+
# - DO NOT import common_device_type inside the file.
|
| 675 |
+
# `runpy.run_path` with `globals()` already properly setup the context so that
|
| 676 |
+
# `DeviceTypeTestBase` is already available.
|
| 677 |
+
# - Set a top-level variable `TEST_CLASS` equal to your new class.
|
| 678 |
+
# E.g. TEST_CLASS = XLATensorBase
|
| 679 |
+
# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
|
| 680 |
+
# to this file. Multiple paths can be separated by `:`.
|
| 681 |
+
# See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
|
| 682 |
+
_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
|
| 683 |
+
if _TORCH_TEST_DEVICES:
|
| 684 |
+
for path in _TORCH_TEST_DEVICES.split(':'):
|
| 685 |
+
# runpy (a stdlib module) lacks annotations
|
| 686 |
+
mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value]
|
| 687 |
+
device_type_test_bases.append(mod['TEST_CLASS'])
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'
|
| 691 |
+
|
| 692 |
+
PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR'
|
| 693 |
+
PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR'
|
| 694 |
+
PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY = 'PYTORCH_TESTING_DEVICE_FOR_CUSTOM'
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def get_desired_device_type_test_bases(except_for=None, only_for=None, include_lazy=False, allow_mps=False):
|
| 698 |
+
# allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy`
|
| 699 |
+
test_bases = device_type_test_bases.copy()
|
| 700 |
+
if allow_mps and TEST_MPS and MPSTestBase not in test_bases:
|
| 701 |
+
test_bases.append(MPSTestBase)
|
| 702 |
+
if only_for == 'xpu' and TEST_XPU and XPUTestBase not in test_bases:
|
| 703 |
+
test_bases.append(XPUTestBase)
|
| 704 |
+
# Filter out the device types based on user inputs
|
| 705 |
+
desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for)
|
| 706 |
+
if include_lazy:
|
| 707 |
+
# Note [Lazy Tensor tests in device agnostic testing]
|
| 708 |
+
# Right now, test_view_ops.py runs with LazyTensor.
|
| 709 |
+
# We don't want to opt every device-agnostic test into using the lazy device,
|
| 710 |
+
# because many of them will fail.
|
| 711 |
+
# So instead, the only way to opt a specific device-agnostic test file into
|
| 712 |
+
# lazy tensor testing is with include_lazy=True
|
| 713 |
+
if IS_FBCODE:
|
| 714 |
+
print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr)
|
| 715 |
+
else:
|
| 716 |
+
desired_device_type_test_bases.append(LazyTestBase)
|
| 717 |
+
|
| 718 |
+
def split_if_not_empty(x: str):
|
| 719 |
+
return x.split(",") if x else []
|
| 720 |
+
|
| 721 |
+
# run some cuda testcases on other devices if available
|
| 722 |
+
# Usage:
|
| 723 |
+
# export PYTORCH_TESTING_DEVICE_FOR_CUSTOM=privateuse1
|
| 724 |
+
env_custom_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY, ''))
|
| 725 |
+
if env_custom_only_for:
|
| 726 |
+
desired_device_type_test_bases += filter(lambda x: x.device_type in env_custom_only_for, test_bases)
|
| 727 |
+
desired_device_type_test_bases = list(set(desired_device_type_test_bases))
|
| 728 |
+
|
| 729 |
+
# Filter out the device types based on environment variables if available
|
| 730 |
+
# Usage:
|
| 731 |
+
# export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
|
| 732 |
+
# export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
|
| 733 |
+
env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, ''))
|
| 734 |
+
env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, ''))
|
| 735 |
+
|
| 736 |
+
return filter_desired_device_types(desired_device_type_test_bases, env_except_for, env_only_for)
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
# Adds 'instantiated' device-specific test cases to the given scope.
|
| 741 |
+
# The tests in these test cases are derived from the generic tests in
|
| 742 |
+
# generic_test_class. This function should be used instead of
|
| 743 |
+
# instantiate_parametrized_tests() if the test class contains
|
| 744 |
+
# device-specific tests (NB: this supports additional @parametrize usage).
|
| 745 |
+
#
|
| 746 |
+
# See note "Writing Test Templates"
|
| 747 |
+
def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False):
|
| 748 |
+
# Removes the generic test class from its enclosing scope so its tests
|
| 749 |
+
# are not discoverable.
|
| 750 |
+
del scope[generic_test_class.__name__]
|
| 751 |
+
|
| 752 |
+
# Creates an 'empty' version of the generic_test_class
|
| 753 |
+
# Note: we don't inherit from the generic_test_class directly because
|
| 754 |
+
# that would add its tests to our test classes and they would be
|
| 755 |
+
# discovered (despite not being runnable). Inherited methods also
|
| 756 |
+
# can't be removed later, and we can't rely on load_tests because
|
| 757 |
+
# pytest doesn't support it (as of this writing).
|
| 758 |
+
empty_name = generic_test_class.__name__ + "_base"
|
| 759 |
+
empty_class = type(empty_name, generic_test_class.__bases__, {})
|
| 760 |
+
|
| 761 |
+
# Acquires members names
|
| 762 |
+
# See Note [Overriding methods in generic tests]
|
| 763 |
+
generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())
|
| 764 |
+
generic_tests = [x for x in generic_members if x.startswith('test')]
|
| 765 |
+
|
| 766 |
+
# Creates device-specific test cases
|
| 767 |
+
for base in get_desired_device_type_test_bases(except_for, only_for, include_lazy, allow_mps):
|
| 768 |
+
class_name = generic_test_class.__name__ + base.device_type.upper()
|
| 769 |
+
|
| 770 |
+
# type set to Any and suppressed due to unsupport runtime class:
|
| 771 |
+
# https://github.com/python/mypy/wiki/Unsupported-Python-Features
|
| 772 |
+
device_type_test_class: Any = type(class_name, (base, empty_class), {})
|
| 773 |
+
|
| 774 |
+
for name in generic_members:
|
| 775 |
+
if name in generic_tests: # Instantiates test member
|
| 776 |
+
test = getattr(generic_test_class, name)
|
| 777 |
+
# XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
|
| 778 |
+
sig = inspect.signature(device_type_test_class.instantiate_test)
|
| 779 |
+
if len(sig.parameters) == 3:
|
| 780 |
+
# Instantiates the device-specific tests
|
| 781 |
+
device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class)
|
| 782 |
+
else:
|
| 783 |
+
device_type_test_class.instantiate_test(name, copy.deepcopy(test))
|
| 784 |
+
else: # Ports non-test member
|
| 785 |
+
assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}"
|
| 786 |
+
nontest = getattr(generic_test_class, name)
|
| 787 |
+
setattr(device_type_test_class, name, nontest)
|
| 788 |
+
|
| 789 |
+
# The dynamically-created test class derives from the test template class
|
| 790 |
+
# and the empty class. Arrange for both setUpClass and tearDownClass methods
|
| 791 |
+
# to be called. This allows the parameterized test classes to support setup
|
| 792 |
+
# and teardown.
|
| 793 |
+
@classmethod
|
| 794 |
+
def _setUpClass(cls):
|
| 795 |
+
base.setUpClass()
|
| 796 |
+
empty_class.setUpClass()
|
| 797 |
+
|
| 798 |
+
@classmethod
|
| 799 |
+
def _tearDownClass(cls):
|
| 800 |
+
empty_class.tearDownClass()
|
| 801 |
+
base.tearDownClass()
|
| 802 |
+
|
| 803 |
+
device_type_test_class.setUpClass = _setUpClass
|
| 804 |
+
device_type_test_class.tearDownClass = _tearDownClass
|
| 805 |
+
|
| 806 |
+
# Mimics defining the instantiated class in the caller's file
|
| 807 |
+
# by setting its module to the given class's and adding
|
| 808 |
+
# the module to the given scope.
|
| 809 |
+
# This lets the instantiated class be discovered by unittest.
|
| 810 |
+
device_type_test_class.__module__ = generic_test_class.__module__
|
| 811 |
+
scope[class_name] = device_type_test_class
|
| 812 |
+
|
| 813 |
+
|
| 814 |
+
# Category of dtypes to run an OpInfo-based test for
|
| 815 |
+
# Example use: @ops(dtype=OpDTypes.supported)
|
| 816 |
+
#
|
| 817 |
+
# There are 5 categories:
|
| 818 |
+
# - supported: Every dtype supported by the operator. Use for exhaustive
|
| 819 |
+
# testing of all dtypes.
|
| 820 |
+
# - unsupported: Run tests on dtypes not supported by the operator. e.g. for
|
| 821 |
+
# testing the operator raises an error and doesn't crash.
|
| 822 |
+
# - supported_backward: Every dtype supported by the operator's backward pass.
|
| 823 |
+
# - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass.
|
| 824 |
+
# - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the
|
| 825 |
+
# operator supports in both forward and backward.
|
| 826 |
+
# - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test
|
| 827 |
+
# when this is selected.
|
| 828 |
+
class OpDTypes(Enum):
|
| 829 |
+
supported = 0 # Test all supported dtypes (default)
|
| 830 |
+
unsupported = 1 # Test only unsupported dtypes
|
| 831 |
+
supported_backward = 2 # Test all supported backward dtypes
|
| 832 |
+
unsupported_backward = 3 # Test only unsupported backward dtypes
|
| 833 |
+
any_one = 4 # Test precisely one supported dtype
|
| 834 |
+
none = 5 # Instantiate no dtype variants (no dtype kwarg needed)
|
| 835 |
+
any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
# Arbitrary order
|
| 839 |
+
ANY_DTYPE_ORDER = (
|
| 840 |
+
torch.float32,
|
| 841 |
+
torch.float64,
|
| 842 |
+
torch.complex64,
|
| 843 |
+
torch.complex128,
|
| 844 |
+
torch.float16,
|
| 845 |
+
torch.bfloat16,
|
| 846 |
+
torch.long,
|
| 847 |
+
torch.int32,
|
| 848 |
+
torch.int16,
|
| 849 |
+
torch.int8,
|
| 850 |
+
torch.uint8,
|
| 851 |
+
torch.bool
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
def _serialize_sample(sample_input):
|
| 855 |
+
# NB: For OpInfos, SampleInput.summary() prints in a cleaner way.
|
| 856 |
+
if getattr(sample_input, "summary", None) is not None:
|
| 857 |
+
return sample_input.summary()
|
| 858 |
+
return str(sample_input)
|
| 859 |
+
|
| 860 |
+
# Decorator that defines the OpInfos a test template should be instantiated for.
|
| 861 |
+
#
|
| 862 |
+
# Example usage:
|
| 863 |
+
#
|
| 864 |
+
# @ops(unary_ufuncs)
|
| 865 |
+
# def test_numerics(self, device, dtype, op):
|
| 866 |
+
# <test_code>
|
| 867 |
+
#
|
| 868 |
+
# This will instantiate variants of test_numerics for each given OpInfo,
|
| 869 |
+
# on each device the OpInfo's operator supports, and for every dtype supported by
|
| 870 |
+
# that operator. There are a few caveats to the dtype rule, explained below.
|
| 871 |
+
#
|
| 872 |
+
# The @ops decorator can accept two
|
| 873 |
+
# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified
|
| 874 |
+
# then the test variants are instantiated for those dtypes, regardless of
|
| 875 |
+
# what the operator supports. If given "allowed_dtypes" then test variants
|
| 876 |
+
# are instantiated only for the intersection of allowed_dtypes and the dtypes
|
| 877 |
+
# they would otherwise be instantiated with. That is, allowed_dtypes composes
|
| 878 |
+
# with the options listed above and below.
|
| 879 |
+
#
|
| 880 |
+
# The "dtypes" argument can also accept additional values (see OpDTypes above):
|
| 881 |
+
# OpDTypes.supported - the test is instantiated for all dtypes the operator
|
| 882 |
+
# supports
|
| 883 |
+
# OpDTypes.unsupported - the test is instantiated for all dtypes the operator
|
| 884 |
+
# doesn't support
|
| 885 |
+
# OpDTypes.supported_backward - the test is instantiated for all dtypes the
|
| 886 |
+
# operator's gradient formula supports
|
| 887 |
+
# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the
|
| 888 |
+
# operator's gradient formula doesn't support
|
| 889 |
+
# OpDTypes.any_one - the test is instantiated for one dtype the
|
| 890 |
+
# operator supports. The dtype supports forward and backward if possible.
|
| 891 |
+
# OpDTypes.none - the test is instantiated without any dtype. The test signature
|
| 892 |
+
# should not include a dtype kwarg in this case.
|
| 893 |
+
#
|
| 894 |
+
# These options allow tests to have considerable control over the dtypes
|
| 895 |
+
# they're instantiated for.
|
| 896 |
+
|
| 897 |
+
class ops(_TestParametrizer):
|
| 898 |
+
def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
|
| 899 |
+
allowed_dtypes: Optional[Sequence[torch.dtype]] = None, skip_if_dynamo=True):
|
| 900 |
+
self.op_list = list(op_list)
|
| 901 |
+
self.opinfo_dtypes = dtypes
|
| 902 |
+
self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
|
| 903 |
+
self.skip_if_dynamo = skip_if_dynamo
|
| 904 |
+
|
| 905 |
+
def _parametrize_test(self, test, generic_cls, device_cls):
|
| 906 |
+
""" Parameterizes the given test function across each op and its associated dtypes. """
|
| 907 |
+
if device_cls is None:
|
| 908 |
+
raise RuntimeError('The @ops decorator is only intended to be used in a device-specific '
|
| 909 |
+
'context; use it with instantiate_device_type_tests() instead of '
|
| 910 |
+
'instantiate_parametrized_tests()')
|
| 911 |
+
|
| 912 |
+
op = check_exhausted_iterator = object()
|
| 913 |
+
for op in self.op_list:
|
| 914 |
+
# Determine the set of dtypes to use.
|
| 915 |
+
dtypes: Union[Set[torch.dtype], Set[None]]
|
| 916 |
+
if isinstance(self.opinfo_dtypes, Sequence):
|
| 917 |
+
dtypes = set(self.opinfo_dtypes)
|
| 918 |
+
elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
|
| 919 |
+
dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type))
|
| 920 |
+
elif self.opinfo_dtypes == OpDTypes.supported_backward:
|
| 921 |
+
dtypes = op.supported_backward_dtypes(device_cls.device_type)
|
| 922 |
+
elif self.opinfo_dtypes == OpDTypes.unsupported:
|
| 923 |
+
dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
|
| 924 |
+
elif self.opinfo_dtypes == OpDTypes.supported:
|
| 925 |
+
dtypes = set(op.supported_dtypes(device_cls.device_type))
|
| 926 |
+
elif self.opinfo_dtypes == OpDTypes.any_one:
|
| 927 |
+
# Tries to pick a dtype that supports both forward or backward
|
| 928 |
+
supported = op.supported_dtypes(device_cls.device_type)
|
| 929 |
+
supported_backward = op.supported_backward_dtypes(device_cls.device_type)
|
| 930 |
+
supported_both = supported.intersection(supported_backward)
|
| 931 |
+
dtype_set = supported_both if len(supported_both) > 0 else supported
|
| 932 |
+
for dtype in ANY_DTYPE_ORDER:
|
| 933 |
+
if dtype in dtype_set:
|
| 934 |
+
dtypes = {dtype}
|
| 935 |
+
break
|
| 936 |
+
else:
|
| 937 |
+
dtypes = {}
|
| 938 |
+
elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
|
| 939 |
+
# Tries to pick a dtype that supports both CPU and CUDA
|
| 940 |
+
supported = set(op.dtypes).intersection(op.dtypesIfCUDA)
|
| 941 |
+
if supported:
|
| 942 |
+
dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)}
|
| 943 |
+
else:
|
| 944 |
+
dtypes = {}
|
| 945 |
+
|
| 946 |
+
elif self.opinfo_dtypes == OpDTypes.none:
|
| 947 |
+
dtypes = {None}
|
| 948 |
+
else:
|
| 949 |
+
raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
|
| 950 |
+
|
| 951 |
+
if self.allowed_dtypes is not None:
|
| 952 |
+
dtypes = dtypes.intersection(self.allowed_dtypes)
|
| 953 |
+
|
| 954 |
+
# Construct the test name; device / dtype parts are handled outside.
|
| 955 |
+
# See [Note: device and dtype suffix placement]
|
| 956 |
+
test_name = op.formatted_name
|
| 957 |
+
|
| 958 |
+
for dtype in dtypes:
|
| 959 |
+
# Construct parameter kwargs to pass to the test.
|
| 960 |
+
param_kwargs = {'op': op}
|
| 961 |
+
_update_param_kwargs(param_kwargs, 'dtype', dtype)
|
| 962 |
+
|
| 963 |
+
# NOTE: test_wrapper exists because we don't want to apply
|
| 964 |
+
# op-specific decorators to the original test.
|
| 965 |
+
# Test-specific decorators are applied to the original test,
|
| 966 |
+
# however.
|
| 967 |
+
try:
|
| 968 |
+
@wraps(test)
|
| 969 |
+
def test_wrapper(*args, **kwargs):
|
| 970 |
+
try:
|
| 971 |
+
return test(*args, **kwargs)
|
| 972 |
+
except unittest.SkipTest as e:
|
| 973 |
+
raise e
|
| 974 |
+
except Exception as e:
|
| 975 |
+
tracked_input = get_tracked_input()
|
| 976 |
+
if PRINT_REPRO_ON_FAILURE and tracked_input is not None:
|
| 977 |
+
raise Exception( # noqa: TRY002
|
| 978 |
+
f"Caused by {tracked_input.type_desc} "
|
| 979 |
+
f"at index {tracked_input.index}: "
|
| 980 |
+
f"{_serialize_sample(tracked_input.val)}") from e
|
| 981 |
+
raise e
|
| 982 |
+
finally:
|
| 983 |
+
clear_tracked_input()
|
| 984 |
+
|
| 985 |
+
if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR:
|
| 986 |
+
test_wrapper = skipIfTorchDynamo("Policy: we don't run OpInfo tests w/ Dynamo")(test_wrapper)
|
| 987 |
+
|
| 988 |
+
# Initialize info for the last input seen. This is useful for tracking
|
| 989 |
+
# down which inputs caused a test failure. Note that TrackedInputIter is
|
| 990 |
+
# responsible for managing this.
|
| 991 |
+
test.tracked_input = None
|
| 992 |
+
|
| 993 |
+
decorator_fn = partial(op.get_decorators, generic_cls.__name__,
|
| 994 |
+
test.__name__, device_cls.device_type, dtype)
|
| 995 |
+
|
| 996 |
+
yield (test_wrapper, test_name, param_kwargs, decorator_fn)
|
| 997 |
+
except Exception as ex:
|
| 998 |
+
# Provides an error message for debugging before rethrowing the exception
|
| 999 |
+
print(f"Failed to instantiate {test_name} for op {op.name}!")
|
| 1000 |
+
raise ex
|
| 1001 |
+
if op is check_exhausted_iterator:
|
| 1002 |
+
raise ValueError('An empty op_list was passed to @ops. '
|
| 1003 |
+
'Note that this may result from reuse of a generator.')
|
| 1004 |
+
|
| 1005 |
+
# Decorator that skips a test if the given condition is true.
|
| 1006 |
+
# Notes:
|
| 1007 |
+
# (1) Skip conditions stack.
|
| 1008 |
+
# (2) Skip conditions can be bools or strings. If a string the
|
| 1009 |
+
# test base must have defined the corresponding attribute to be False
|
| 1010 |
+
# for the test to run. If you want to use a string argument you should
|
| 1011 |
+
# probably define a new decorator instead (see below).
|
| 1012 |
+
# (3) Prefer the existing decorators to defining the 'device_type' kwarg.
|
| 1013 |
+
class skipIf:
|
| 1014 |
+
|
| 1015 |
+
def __init__(self, dep, reason, device_type=None):
|
| 1016 |
+
self.dep = dep
|
| 1017 |
+
self.reason = reason
|
| 1018 |
+
self.device_type = device_type
|
| 1019 |
+
|
| 1020 |
+
def __call__(self, fn):
|
| 1021 |
+
|
| 1022 |
+
@wraps(fn)
|
| 1023 |
+
def dep_fn(slf, *args, **kwargs):
|
| 1024 |
+
if self.device_type is None or self.device_type == slf.device_type:
|
| 1025 |
+
if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep):
|
| 1026 |
+
raise unittest.SkipTest(self.reason)
|
| 1027 |
+
|
| 1028 |
+
return fn(slf, *args, **kwargs)
|
| 1029 |
+
return dep_fn
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
# Skips a test on CPU if the condition is true.
|
| 1033 |
+
class skipCPUIf(skipIf):
|
| 1034 |
+
|
| 1035 |
+
def __init__(self, dep, reason):
|
| 1036 |
+
super().__init__(dep, reason, device_type='cpu')
|
| 1037 |
+
|
| 1038 |
+
|
| 1039 |
+
# Skips a test on CUDA if the condition is true.
|
| 1040 |
+
class skipCUDAIf(skipIf):
|
| 1041 |
+
|
| 1042 |
+
def __init__(self, dep, reason):
|
| 1043 |
+
super().__init__(dep, reason, device_type='cuda')
|
| 1044 |
+
|
| 1045 |
+
# Skips a test on Lazy if the condition is true.
|
| 1046 |
+
class skipLazyIf(skipIf):
|
| 1047 |
+
|
| 1048 |
+
def __init__(self, dep, reason):
|
| 1049 |
+
super().__init__(dep, reason, device_type='lazy')
|
| 1050 |
+
|
| 1051 |
+
# Skips a test on Meta if the condition is true.
|
| 1052 |
+
class skipMetaIf(skipIf):
|
| 1053 |
+
|
| 1054 |
+
def __init__(self, dep, reason):
|
| 1055 |
+
super().__init__(dep, reason, device_type='meta')
|
| 1056 |
+
|
| 1057 |
+
# Skips a test on MPS if the condition is true.
|
| 1058 |
+
class skipMPSIf(skipIf):
|
| 1059 |
+
|
| 1060 |
+
def __init__(self, dep, reason):
|
| 1061 |
+
super().__init__(dep, reason, device_type='mps')
|
| 1062 |
+
|
| 1063 |
+
# Skips a test on XLA if the condition is true.
|
| 1064 |
+
class skipXLAIf(skipIf):
|
| 1065 |
+
|
| 1066 |
+
def __init__(self, dep, reason):
|
| 1067 |
+
super().__init__(dep, reason, device_type='xla')
|
| 1068 |
+
|
| 1069 |
+
class skipPRIVATEUSE1If(skipIf):
|
| 1070 |
+
|
| 1071 |
+
def __init__(self, dep, reason):
|
| 1072 |
+
device_type = torch._C._get_privateuse1_backend_name()
|
| 1073 |
+
super().__init__(dep, reason, device_type=device_type)
|
| 1074 |
+
|
| 1075 |
+
def _has_sufficient_memory(device, size):
|
| 1076 |
+
if torch.device(device).type == 'cuda':
|
| 1077 |
+
if not torch.cuda.is_available():
|
| 1078 |
+
return False
|
| 1079 |
+
gc.collect()
|
| 1080 |
+
torch.cuda.empty_cache()
|
| 1081 |
+
# torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU
|
| 1082 |
+
if device == 'cuda':
|
| 1083 |
+
device = 'cuda:0'
|
| 1084 |
+
return torch.cuda.memory.mem_get_info(device)[0] >= size
|
| 1085 |
+
|
| 1086 |
+
if device == 'xla':
|
| 1087 |
+
raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
|
| 1088 |
+
|
| 1089 |
+
if device != 'cpu':
|
| 1090 |
+
raise unittest.SkipTest('Unknown device type')
|
| 1091 |
+
|
| 1092 |
+
# CPU
|
| 1093 |
+
if not HAS_PSUTIL:
|
| 1094 |
+
raise unittest.SkipTest('Need psutil to determine if memory is sufficient')
|
| 1095 |
+
|
| 1096 |
+
# The sanitizers have significant memory overheads
|
| 1097 |
+
if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:
|
| 1098 |
+
effective_size = size * 10
|
| 1099 |
+
else:
|
| 1100 |
+
effective_size = size
|
| 1101 |
+
|
| 1102 |
+
if psutil.virtual_memory().available < effective_size:
|
| 1103 |
+
gc.collect()
|
| 1104 |
+
return psutil.virtual_memory().available >= effective_size
|
| 1105 |
+
|
| 1106 |
+
|
| 1107 |
+
def largeTensorTest(size, device=None):
|
| 1108 |
+
"""Skip test if the device has insufficient memory to run the test
|
| 1109 |
+
|
| 1110 |
+
size may be a number of bytes, a string of the form "N GB", or a callable
|
| 1111 |
+
|
| 1112 |
+
If the test is a device generic test, available memory on the primary device will be checked.
|
| 1113 |
+
It can also be overriden by the optional `device=` argument.
|
| 1114 |
+
In other tests, the `device=` argument needs to be specified.
|
| 1115 |
+
"""
|
| 1116 |
+
if isinstance(size, str):
|
| 1117 |
+
assert size.endswith(('GB', 'gb')), "only bytes or GB supported"
|
| 1118 |
+
size = 1024 ** 3 * int(size[:-2])
|
| 1119 |
+
|
| 1120 |
+
def inner(fn):
|
| 1121 |
+
@wraps(fn)
|
| 1122 |
+
def dep_fn(self, *args, **kwargs):
|
| 1123 |
+
size_bytes = size(self, *args, **kwargs) if callable(size) else size
|
| 1124 |
+
_device = device if device is not None else self.get_primary_device()
|
| 1125 |
+
if not _has_sufficient_memory(_device, size_bytes):
|
| 1126 |
+
raise unittest.SkipTest(f'Insufficient {_device} memory')
|
| 1127 |
+
|
| 1128 |
+
return fn(self, *args, **kwargs)
|
| 1129 |
+
return dep_fn
|
| 1130 |
+
return inner
|
| 1131 |
+
|
| 1132 |
+
|
| 1133 |
+
class expectedFailure:
|
| 1134 |
+
|
| 1135 |
+
def __init__(self, device_type):
|
| 1136 |
+
self.device_type = device_type
|
| 1137 |
+
|
| 1138 |
+
def __call__(self, fn):
|
| 1139 |
+
|
| 1140 |
+
@wraps(fn)
|
| 1141 |
+
def efail_fn(slf, *args, **kwargs):
|
| 1142 |
+
if not hasattr(slf, "device_type") and hasattr(slf, "device") and isinstance(slf.device, str):
|
| 1143 |
+
target_device_type = slf.device
|
| 1144 |
+
else:
|
| 1145 |
+
target_device_type = slf.device_type
|
| 1146 |
+
|
| 1147 |
+
if self.device_type is None or self.device_type == target_device_type:
|
| 1148 |
+
try:
|
| 1149 |
+
fn(slf, *args, **kwargs)
|
| 1150 |
+
except Exception:
|
| 1151 |
+
return
|
| 1152 |
+
else:
|
| 1153 |
+
slf.fail('expected test to fail, but it passed')
|
| 1154 |
+
|
| 1155 |
+
return fn(slf, *args, **kwargs)
|
| 1156 |
+
return efail_fn
|
| 1157 |
+
|
| 1158 |
+
|
| 1159 |
+
class onlyOn:
|
| 1160 |
+
|
| 1161 |
+
def __init__(self, device_type):
|
| 1162 |
+
self.device_type = device_type
|
| 1163 |
+
|
| 1164 |
+
def __call__(self, fn):
|
| 1165 |
+
|
| 1166 |
+
@wraps(fn)
|
| 1167 |
+
def only_fn(slf, *args, **kwargs):
|
| 1168 |
+
if self.device_type != slf.device_type:
|
| 1169 |
+
reason = f"Only runs on {self.device_type}"
|
| 1170 |
+
raise unittest.SkipTest(reason)
|
| 1171 |
+
|
| 1172 |
+
return fn(slf, *args, **kwargs)
|
| 1173 |
+
|
| 1174 |
+
return only_fn
|
| 1175 |
+
|
| 1176 |
+
|
| 1177 |
+
# Decorator that provides all available devices of the device type to the test
|
| 1178 |
+
# as a list of strings instead of providing a single device string.
|
| 1179 |
+
# Skips the test if the number of available devices of the variant's device
|
| 1180 |
+
# type is less than the 'num_required_devices' arg.
|
| 1181 |
+
class deviceCountAtLeast:
|
| 1182 |
+
|
| 1183 |
+
def __init__(self, num_required_devices):
|
| 1184 |
+
self.num_required_devices = num_required_devices
|
| 1185 |
+
|
| 1186 |
+
def __call__(self, fn):
|
| 1187 |
+
assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}"
|
| 1188 |
+
fn.num_required_devices = self.num_required_devices
|
| 1189 |
+
|
| 1190 |
+
@wraps(fn)
|
| 1191 |
+
def multi_fn(slf, devices, *args, **kwargs):
|
| 1192 |
+
if len(devices) < self.num_required_devices:
|
| 1193 |
+
reason = f"fewer than {self.num_required_devices} devices detected"
|
| 1194 |
+
raise unittest.SkipTest(reason)
|
| 1195 |
+
|
| 1196 |
+
return fn(slf, devices, *args, **kwargs)
|
| 1197 |
+
|
| 1198 |
+
return multi_fn
|
| 1199 |
+
|
| 1200 |
+
# Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1)
|
| 1201 |
+
def onlyNativeDeviceTypes(fn):
|
| 1202 |
+
@wraps(fn)
|
| 1203 |
+
def only_fn(self, *args, **kwargs):
|
| 1204 |
+
if self.device_type not in NATIVE_DEVICES:
|
| 1205 |
+
reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}"
|
| 1206 |
+
raise unittest.SkipTest(reason)
|
| 1207 |
+
|
| 1208 |
+
return fn(self, *args, **kwargs)
|
| 1209 |
+
|
| 1210 |
+
return only_fn
|
| 1211 |
+
|
| 1212 |
+
# Specifies per-dtype precision overrides.
|
| 1213 |
+
# Ex.
|
| 1214 |
+
#
|
| 1215 |
+
# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4})
|
| 1216 |
+
# @dtypes(torch.half, torch.float, torch.double)
|
| 1217 |
+
# def test_X(self, device, dtype):
|
| 1218 |
+
# ...
|
| 1219 |
+
#
|
| 1220 |
+
# When the test is instantiated its class's precision will be set to the
|
| 1221 |
+
# corresponding override, if it exists.
|
| 1222 |
+
# self.precision can be accessed directly, and it also controls the behavior of
|
| 1223 |
+
# functions like self.assertEqual().
|
| 1224 |
+
#
|
| 1225 |
+
# Note that self.precision is a scalar value, so if you require multiple
|
| 1226 |
+
# precisions (or are working with multiple dtypes) they should be specified
|
| 1227 |
+
# explicitly and computed using self.precision (e.g.
|
| 1228 |
+
# self.precision *2, max(1, self.precision)).
|
| 1229 |
+
class precisionOverride:
|
| 1230 |
+
|
| 1231 |
+
def __init__(self, d):
|
| 1232 |
+
assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!"
|
| 1233 |
+
for dtype in d.keys():
|
| 1234 |
+
assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}"
|
| 1235 |
+
|
| 1236 |
+
self.d = d
|
| 1237 |
+
|
| 1238 |
+
def __call__(self, fn):
|
| 1239 |
+
fn.precision_overrides = self.d
|
| 1240 |
+
return fn
|
| 1241 |
+
|
| 1242 |
+
# Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over
|
| 1243 |
+
# precisionOverride.
|
| 1244 |
+
# Ex.
|
| 1245 |
+
#
|
| 1246 |
+
# @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3},
|
| 1247 |
+
# torch.double : tol{atol=1e-4, rtol = 0})
|
| 1248 |
+
# @dtypes(torch.half, torch.float, torch.double)
|
| 1249 |
+
# def test_X(self, device, dtype):
|
| 1250 |
+
# ...
|
| 1251 |
+
#
|
| 1252 |
+
# When the test is instantiated its class's tolerance will be set to the
|
| 1253 |
+
# corresponding override, if it exists.
|
| 1254 |
+
# self.rtol and self.precision can be accessed directly, and they also control
|
| 1255 |
+
# the behavior of functions like self.assertEqual().
|
| 1256 |
+
#
|
| 1257 |
+
# The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and
|
| 1258 |
+
# atol = 1e-4 and rtol = 0 for torch.double.
|
| 1259 |
+
tol = namedtuple('tol', ['atol', 'rtol'])
|
| 1260 |
+
|
| 1261 |
+
class toleranceOverride:
|
| 1262 |
+
def __init__(self, d):
|
| 1263 |
+
assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!"
|
| 1264 |
+
for dtype, prec in d.items():
|
| 1265 |
+
assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}"
|
| 1266 |
+
assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!"
|
| 1267 |
+
|
| 1268 |
+
self.d = d
|
| 1269 |
+
|
| 1270 |
+
def __call__(self, fn):
|
| 1271 |
+
fn.tolerance_overrides = self.d
|
| 1272 |
+
return fn
|
| 1273 |
+
|
| 1274 |
+
# Decorator that instantiates a variant of the test for each given dtype.
|
| 1275 |
+
# Notes:
|
| 1276 |
+
# (1) Tests that accept the dtype argument MUST use this decorator.
|
| 1277 |
+
# (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU
|
| 1278 |
+
# or dtypesIfCUDA.
|
| 1279 |
+
# (3) Can accept an iterable of dtypes or an iterable of tuples
|
| 1280 |
+
# of dtypes.
|
| 1281 |
+
# Examples:
|
| 1282 |
+
# @dtypes(torch.float32, torch.float64)
|
| 1283 |
+
# @dtypes((torch.long, torch.float32), (torch.int, torch.float64))
|
| 1284 |
+
class dtypes:
|
| 1285 |
+
|
| 1286 |
+
def __init__(self, *args, device_type="all"):
|
| 1287 |
+
if len(args) > 0 and isinstance(args[0], (list, tuple)):
|
| 1288 |
+
for arg in args:
|
| 1289 |
+
assert isinstance(arg, (list, tuple)), \
|
| 1290 |
+
"When one dtype variant is a tuple or list, " \
|
| 1291 |
+
"all dtype variants must be. " \
|
| 1292 |
+
f"Received non-list non-tuple dtype {str(arg)}"
|
| 1293 |
+
assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}"
|
| 1294 |
+
else:
|
| 1295 |
+
assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}"
|
| 1296 |
+
|
| 1297 |
+
self.args = args
|
| 1298 |
+
self.device_type = device_type
|
| 1299 |
+
|
| 1300 |
+
def __call__(self, fn):
|
| 1301 |
+
d = getattr(fn, 'dtypes', {})
|
| 1302 |
+
assert self.device_type not in d, f"dtypes redefinition for {self.device_type}"
|
| 1303 |
+
d[self.device_type] = self.args
|
| 1304 |
+
fn.dtypes = d
|
| 1305 |
+
return fn
|
| 1306 |
+
|
| 1307 |
+
|
| 1308 |
+
# Overrides specified dtypes on the CPU.
|
| 1309 |
+
class dtypesIfCPU(dtypes):
|
| 1310 |
+
|
| 1311 |
+
def __init__(self, *args):
|
| 1312 |
+
super().__init__(*args, device_type='cpu')
|
| 1313 |
+
|
| 1314 |
+
|
| 1315 |
+
# Overrides specified dtypes on CUDA.
|
| 1316 |
+
class dtypesIfCUDA(dtypes):
|
| 1317 |
+
|
| 1318 |
+
def __init__(self, *args):
|
| 1319 |
+
super().__init__(*args, device_type='cuda')
|
| 1320 |
+
|
| 1321 |
+
class dtypesIfMPS(dtypes):
|
| 1322 |
+
|
| 1323 |
+
def __init__(self, *args):
|
| 1324 |
+
super().__init__(*args, device_type='mps')
|
| 1325 |
+
|
| 1326 |
+
class dtypesIfPRIVATEUSE1(dtypes):
|
| 1327 |
+
|
| 1328 |
+
def __init__(self, *args):
|
| 1329 |
+
super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name())
|
| 1330 |
+
|
| 1331 |
+
def onlyCPU(fn):
|
| 1332 |
+
return onlyOn('cpu')(fn)
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
def onlyCUDA(fn):
|
| 1336 |
+
return onlyOn('cuda')(fn)
|
| 1337 |
+
|
| 1338 |
+
|
| 1339 |
+
def onlyMPS(fn):
|
| 1340 |
+
return onlyOn('mps')(fn)
|
| 1341 |
+
|
| 1342 |
+
|
| 1343 |
+
def onlyXPU(fn):
|
| 1344 |
+
return onlyOn('xpu')(fn)
|
| 1345 |
+
|
| 1346 |
+
def onlyPRIVATEUSE1(fn):
|
| 1347 |
+
device_type = torch._C._get_privateuse1_backend_name()
|
| 1348 |
+
device_mod = getattr(torch, device_type, None)
|
| 1349 |
+
if device_mod is None:
|
| 1350 |
+
reason = f"Skip as torch has no module of {device_type}"
|
| 1351 |
+
return unittest.skip(reason)(fn)
|
| 1352 |
+
return onlyOn(device_type)(fn)
|
| 1353 |
+
|
| 1354 |
+
def onlyCUDAAndPRIVATEUSE1(fn):
|
| 1355 |
+
@wraps(fn)
|
| 1356 |
+
def only_fn(self, *args, **kwargs):
|
| 1357 |
+
if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()):
|
| 1358 |
+
reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}"
|
| 1359 |
+
raise unittest.SkipTest(reason)
|
| 1360 |
+
|
| 1361 |
+
return fn(self, *args, **kwargs)
|
| 1362 |
+
|
| 1363 |
+
return only_fn
|
| 1364 |
+
|
| 1365 |
+
def disablecuDNN(fn):
|
| 1366 |
+
|
| 1367 |
+
@wraps(fn)
|
| 1368 |
+
def disable_cudnn(self, *args, **kwargs):
|
| 1369 |
+
if self.device_type == 'cuda' and self.has_cudnn():
|
| 1370 |
+
with torch.backends.cudnn.flags(enabled=False):
|
| 1371 |
+
return fn(self, *args, **kwargs)
|
| 1372 |
+
return fn(self, *args, **kwargs)
|
| 1373 |
+
|
| 1374 |
+
return disable_cudnn
|
| 1375 |
+
|
| 1376 |
+
def disableMkldnn(fn):
|
| 1377 |
+
|
| 1378 |
+
@wraps(fn)
|
| 1379 |
+
def disable_mkldnn(self, *args, **kwargs):
|
| 1380 |
+
if torch.backends.mkldnn.is_available():
|
| 1381 |
+
with torch.backends.mkldnn.flags(enabled=False):
|
| 1382 |
+
return fn(self, *args, **kwargs)
|
| 1383 |
+
return fn(self, *args, **kwargs)
|
| 1384 |
+
|
| 1385 |
+
return disable_mkldnn
|
| 1386 |
+
|
| 1387 |
+
|
| 1388 |
+
def expectedFailureCPU(fn):
|
| 1389 |
+
return expectedFailure('cpu')(fn)
|
| 1390 |
+
|
| 1391 |
+
|
| 1392 |
+
def expectedFailureCUDA(fn):
|
| 1393 |
+
return expectedFailure('cuda')(fn)
|
| 1394 |
+
|
| 1395 |
+
def expectedFailureXPU(fn):
|
| 1396 |
+
return expectedFailure('xpu')(fn)
|
| 1397 |
+
|
| 1398 |
+
def expectedFailureMeta(fn):
|
| 1399 |
+
return skipIfTorchDynamo()(expectedFailure('meta')(fn))
|
| 1400 |
+
|
| 1401 |
+
def expectedFailureXLA(fn):
|
| 1402 |
+
return expectedFailure('xla')(fn)
|
| 1403 |
+
|
| 1404 |
+
# Skips a test on CPU if LAPACK is not available.
|
| 1405 |
+
def skipCPUIfNoLapack(fn):
|
| 1406 |
+
return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
|
| 1407 |
+
|
| 1408 |
+
|
| 1409 |
+
# Skips a test on CPU if FFT is not available.
|
| 1410 |
+
def skipCPUIfNoFFT(fn):
|
| 1411 |
+
return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn)
|
| 1412 |
+
|
| 1413 |
+
|
| 1414 |
+
# Skips a test on CPU if MKL is not available.
|
| 1415 |
+
def skipCPUIfNoMkl(fn):
|
| 1416 |
+
return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
|
| 1417 |
+
|
| 1418 |
+
|
| 1419 |
+
# Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows).
|
| 1420 |
+
def skipCPUIfNoMklSparse(fn):
|
| 1421 |
+
return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn)
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
# Skips a test on CPU if mkldnn is not available.
|
| 1425 |
+
def skipCPUIfNoMkldnn(fn):
|
| 1426 |
+
return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn)
|
| 1427 |
+
|
| 1428 |
+
|
| 1429 |
+
# Skips a test on CUDA if MAGMA is not available.
|
| 1430 |
+
def skipCUDAIfNoMagma(fn):
|
| 1431 |
+
return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn))
|
| 1432 |
+
|
| 1433 |
+
def has_cusolver():
|
| 1434 |
+
return not TEST_WITH_ROCM
|
| 1435 |
+
|
| 1436 |
+
def has_hipsolver():
|
| 1437 |
+
rocm_version = _get_torch_rocm_version()
|
| 1438 |
+
# hipSOLVER is disabled on ROCM < 5.3
|
| 1439 |
+
return rocm_version >= (5, 3)
|
| 1440 |
+
|
| 1441 |
+
# Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available
|
| 1442 |
+
def skipCUDAIfNoCusolver(fn):
|
| 1443 |
+
return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn)
|
| 1444 |
+
|
| 1445 |
+
|
| 1446 |
+
# Skips a test if both cuSOLVER and MAGMA are not available
|
| 1447 |
+
def skipCUDAIfNoMagmaAndNoCusolver(fn):
|
| 1448 |
+
if has_cusolver():
|
| 1449 |
+
return fn
|
| 1450 |
+
else:
|
| 1451 |
+
# cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
|
| 1452 |
+
return skipCUDAIfNoMagma(fn)
|
| 1453 |
+
|
| 1454 |
+
# Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available
|
| 1455 |
+
def skipCUDAIfNoMagmaAndNoLinalgsolver(fn):
|
| 1456 |
+
if has_cusolver() or has_hipsolver():
|
| 1457 |
+
return fn
|
| 1458 |
+
else:
|
| 1459 |
+
# cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA
|
| 1460 |
+
return skipCUDAIfNoMagma(fn)
|
| 1461 |
+
|
| 1462 |
+
# Skips a test on CUDA when using ROCm.
|
| 1463 |
+
def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"):
|
| 1464 |
+
def dec_fn(fn):
|
| 1465 |
+
reason = f"skipCUDAIfRocm: {msg}"
|
| 1466 |
+
return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn)
|
| 1467 |
+
if func:
|
| 1468 |
+
return dec_fn(func)
|
| 1469 |
+
return dec_fn
|
| 1470 |
+
|
| 1471 |
+
# Skips a test on CUDA when not using ROCm.
|
| 1472 |
+
def skipCUDAIfNotRocm(fn):
|
| 1473 |
+
return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn)
|
| 1474 |
+
|
| 1475 |
+
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
|
| 1476 |
+
def skipCUDAIfRocmVersionLessThan(version=None):
|
| 1477 |
+
|
| 1478 |
+
def dec_fn(fn):
|
| 1479 |
+
@wraps(fn)
|
| 1480 |
+
def wrap_fn(self, *args, **kwargs):
|
| 1481 |
+
if self.device_type == 'cuda':
|
| 1482 |
+
if not TEST_WITH_ROCM:
|
| 1483 |
+
reason = "ROCm not available"
|
| 1484 |
+
raise unittest.SkipTest(reason)
|
| 1485 |
+
rocm_version_tuple = _get_torch_rocm_version()
|
| 1486 |
+
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
|
| 1487 |
+
reason = f"ROCm {rocm_version_tuple} is available but {version} required"
|
| 1488 |
+
raise unittest.SkipTest(reason)
|
| 1489 |
+
|
| 1490 |
+
return fn(self, *args, **kwargs)
|
| 1491 |
+
|
| 1492 |
+
return wrap_fn
|
| 1493 |
+
return dec_fn
|
| 1494 |
+
|
| 1495 |
+
# Skips a test on CUDA when using ROCm.
|
| 1496 |
+
def skipCUDAIfNotMiopenSuggestNHWC(fn):
|
| 1497 |
+
return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn)
|
| 1498 |
+
|
| 1499 |
+
# Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s.
|
| 1500 |
+
def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None):
|
| 1501 |
+
def dec_fn(fn):
|
| 1502 |
+
@wraps(fn)
|
| 1503 |
+
def wrap_fn(self, *args, **kwargs):
|
| 1504 |
+
version = _get_torch_cuda_version()
|
| 1505 |
+
if version == (0, 0): # cpu or rocm
|
| 1506 |
+
return fn(self, *args, **kwargs)
|
| 1507 |
+
if version in (versions or []):
|
| 1508 |
+
reason = f"test skipped for CUDA version {version}"
|
| 1509 |
+
raise unittest.SkipTest(reason)
|
| 1510 |
+
return fn(self, *args, **kwargs)
|
| 1511 |
+
|
| 1512 |
+
return wrap_fn
|
| 1513 |
+
return dec_fn
|
| 1514 |
+
|
| 1515 |
+
# Skips a test for CUDA versions less than specified, given in the form of [major, minor].
|
| 1516 |
+
def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None):
|
| 1517 |
+
def dec_fn(fn):
|
| 1518 |
+
@wraps(fn)
|
| 1519 |
+
def wrap_fn(self, *args, **kwargs):
|
| 1520 |
+
version = _get_torch_cuda_version()
|
| 1521 |
+
if version == (0, 0): # cpu or rocm
|
| 1522 |
+
return fn(self, *args, **kwargs)
|
| 1523 |
+
if version < versions:
|
| 1524 |
+
reason = f"test skipped for CUDA versions < {version}"
|
| 1525 |
+
raise unittest.SkipTest(reason)
|
| 1526 |
+
return fn(self, *args, **kwargs)
|
| 1527 |
+
|
| 1528 |
+
return wrap_fn
|
| 1529 |
+
return dec_fn
|
| 1530 |
+
|
| 1531 |
+
# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
|
| 1532 |
+
def skipCUDAIfCudnnVersionLessThan(version=0):
|
| 1533 |
+
|
| 1534 |
+
def dec_fn(fn):
|
| 1535 |
+
@wraps(fn)
|
| 1536 |
+
def wrap_fn(self, *args, **kwargs):
|
| 1537 |
+
if self.device_type == 'cuda':
|
| 1538 |
+
if self.no_cudnn:
|
| 1539 |
+
reason = "cuDNN not available"
|
| 1540 |
+
raise unittest.SkipTest(reason)
|
| 1541 |
+
if self.cudnn_version is None or self.cudnn_version < version:
|
| 1542 |
+
reason = f"cuDNN version {self.cudnn_version} is available but {version} required"
|
| 1543 |
+
raise unittest.SkipTest(reason)
|
| 1544 |
+
|
| 1545 |
+
return fn(self, *args, **kwargs)
|
| 1546 |
+
|
| 1547 |
+
return wrap_fn
|
| 1548 |
+
return dec_fn
|
| 1549 |
+
|
| 1550 |
+
# Skips a test on CUDA if cuSparse generic API is not available
|
| 1551 |
+
def skipCUDAIfNoCusparseGeneric(fn):
|
| 1552 |
+
return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn)
|
| 1553 |
+
|
| 1554 |
+
def skipCUDAIfNoHipsparseGeneric(fn):
|
| 1555 |
+
return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn)
|
| 1556 |
+
|
| 1557 |
+
def skipCUDAIfNoSparseGeneric(fn):
|
| 1558 |
+
return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn)
|
| 1559 |
+
|
| 1560 |
+
def skipCUDAIfNoCudnn(fn):
|
| 1561 |
+
return skipCUDAIfCudnnVersionLessThan(0)(fn)
|
| 1562 |
+
|
| 1563 |
+
def skipCUDAIfMiopen(fn):
|
| 1564 |
+
return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn)
|
| 1565 |
+
|
| 1566 |
+
def skipCUDAIfNoMiopen(fn):
|
| 1567 |
+
return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn))
|
| 1568 |
+
|
| 1569 |
+
def skipLazy(fn):
|
| 1570 |
+
return skipLazyIf(True, "test doesn't work with lazy tensors")(fn)
|
| 1571 |
+
|
| 1572 |
+
def skipMeta(fn):
|
| 1573 |
+
return skipMetaIf(True, "test doesn't work with meta tensors")(fn)
|
| 1574 |
+
|
| 1575 |
+
def skipXLA(fn):
|
| 1576 |
+
return skipXLAIf(True, "Marked as skipped for XLA")(fn)
|
| 1577 |
+
|
| 1578 |
+
def skipMPS(fn):
|
| 1579 |
+
return skipMPSIf(True, "test doesn't work on MPS backend")(fn)
|
| 1580 |
+
|
| 1581 |
+
def skipPRIVATEUSE1(fn):
|
| 1582 |
+
return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn)
|
| 1583 |
+
|
| 1584 |
+
# TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now.
|
| 1585 |
+
# This should probably enumerate all available device type test base classes.
|
| 1586 |
+
def get_all_device_types() -> List[str]:
|
| 1587 |
+
return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
|
valley/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# mypy: ignore-errors
|
| 2 |
+
|
| 3 |
+
# Owner(s): ["oncall: distributed"]
|
| 4 |
+
|
| 5 |
+
from typing import Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class UnitModule(nn.Module):
|
| 12 |
+
def __init__(self, device: torch.device):
|
| 13 |
+
super().__init__()
|
| 14 |
+
self.l1 = nn.Linear(100, 100, device=device)
|
| 15 |
+
self.seq = nn.Sequential(
|
| 16 |
+
nn.ReLU(),
|
| 17 |
+
nn.Linear(100, 100, device=device),
|
| 18 |
+
nn.ReLU(),
|
| 19 |
+
)
|
| 20 |
+
self.l2 = nn.Linear(100, 100, device=device)
|
| 21 |
+
|
| 22 |
+
def forward(self, x):
|
| 23 |
+
return self.l2(self.seq(self.l1(x)))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class CompositeModel(nn.Module):
|
| 27 |
+
def __init__(self, device: torch.device):
|
| 28 |
+
super().__init__()
|
| 29 |
+
self.l1 = nn.Linear(100, 100, device=device)
|
| 30 |
+
self.u1 = UnitModule(device)
|
| 31 |
+
self.u2 = UnitModule(device)
|
| 32 |
+
self.l2 = nn.Linear(100, 100, device=device)
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
return self.l2(self.u2(self.u1(self.l1(x))))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class UnitParamModule(nn.Module):
|
| 39 |
+
def __init__(self, device: torch.device):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.l = nn.Linear(100, 100, device=device)
|
| 42 |
+
self.seq = nn.Sequential(
|
| 43 |
+
nn.ReLU(),
|
| 44 |
+
nn.Linear(100, 100, device=device),
|
| 45 |
+
nn.ReLU(),
|
| 46 |
+
)
|
| 47 |
+
self.p = nn.Parameter(torch.randn((100, 100), device=device))
|
| 48 |
+
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
return torch.mm(self.seq(self.l(x)), self.p)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class CompositeParamModel(nn.Module):
|
| 54 |
+
def __init__(self, device: torch.device):
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.l = nn.Linear(100, 100, device=device)
|
| 57 |
+
self.u1 = UnitModule(device)
|
| 58 |
+
self.u2 = UnitModule(device)
|
| 59 |
+
self.p = nn.Parameter(torch.randn((100, 100), device=device))
|
| 60 |
+
self.register_buffer(
|
| 61 |
+
"buffer", torch.randn((100, 100), device=device), persistent=True
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
a = self.u2(self.u1(self.l(x)))
|
| 66 |
+
b = self.p
|
| 67 |
+
return torch.mm(a, b)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class FakeSequential(nn.Module):
|
| 71 |
+
# Define this class to achieve a desired nested wrapping using the module
|
| 72 |
+
# wrap policy with `nn.Sequential`
|
| 73 |
+
def __init__(self, *modules: Tuple[nn.Module, ...]) -> None:
|
| 74 |
+
super().__init__()
|
| 75 |
+
self._module_sequence = list(modules)
|
| 76 |
+
|
| 77 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 78 |
+
for module in self._module_sequence:
|
| 79 |
+
x = module(x)
|
| 80 |
+
return x
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class NestedSequentialModel(nn.Module):
|
| 84 |
+
def __init__(self, device: torch.device) -> None:
|
| 85 |
+
super().__init__()
|
| 86 |
+
# This nested structure exercises traversal order to catch differences
|
| 87 |
+
# between valid traversals (e.g. BFS and DFS variations).
|
| 88 |
+
self.seq1 = nn.Sequential(
|
| 89 |
+
nn.Linear(1, 1, device=device),
|
| 90 |
+
FakeSequential(
|
| 91 |
+
nn.Linear(1, 1, device=device),
|
| 92 |
+
nn.ReLU(),
|
| 93 |
+
FakeSequential(
|
| 94 |
+
nn.Linear(1, 1, device=device),
|
| 95 |
+
),
|
| 96 |
+
nn.ReLU(),
|
| 97 |
+
),
|
| 98 |
+
nn.Linear(1, 2, device=device),
|
| 99 |
+
)
|
| 100 |
+
self.lin = nn.Linear(2, 2, device=device)
|
| 101 |
+
self.seq2 = nn.Sequential(
|
| 102 |
+
nn.ReLU(),
|
| 103 |
+
nn.Linear(2, 3, device=device),
|
| 104 |
+
FakeSequential(
|
| 105 |
+
nn.Linear(3, 2, bias=False, device=device),
|
| 106 |
+
nn.Linear(2, 4, bias=False, device=device),
|
| 107 |
+
),
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 111 |
+
return self.seq2(self.lin(self.seq1(x)))
|