diff --git a/.gitattributes b/.gitattributes index 6fa66b954cd51a66b7d16012461ad4807462194a..f50ba686ad3738ed5494cf873b60aa0eed1be8ed 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1532,3 +1532,5 @@ janus/lib/libasan.so.6.0.0 filter=lfs diff=lfs merge=lfs -text janus/lib/libssl.so filter=lfs diff=lfs merge=lfs -text janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text +infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so b/infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4bf47c90bf07259a20ec910724e4653a76c05300 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f87ce97d46442abda5490c80742c2ce65e1c06782a94d0a03f60c4e678c5649 +size 1188120 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..810b33a1e06991a265dfa1103c551a7bf016f804 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1f07687d3874ad47493ac795f6d7e7a86815e211a1892f75efdf76ac80b2b55 +size 142787 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d203d225899b130e7b70c129b32bfbe5630dd2c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py b/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1445dd7bce648bc4ac80a2782d72cf0faba2e0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py @@ -0,0 +1,143 @@ +# mypy: allow-untyped-defs +import time +from collections import defaultdict +from functools import partial +from typing import DefaultDict + +import torch + + +# Unfortunately it doesn't seem as if there was any way to get TensorBoard to do +# anything without having TF installed, and so this file has a hard dependency on it +# as well. It really is a debugging tool, so it doesn't matter. +try: + from tensorflow.core.util import event_pb2 + from tensorflow.core.framework import graph_pb2 + from tensorflow.python.summary.writer.writer import FileWriter +except ImportError: + raise ImportError("TensorBoard visualization of GraphExecutors requires having " + "TensorFlow installed") from None + + +def dump_tensorboard_summary(graph_executor, logdir): + with FileWriter(logdir) as w: + pb_graph = visualize(graph_executor) + evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString()) + w.add_event(evt) + + +def visualize(graph, name_prefix='', pb_graph=None, executors_it=None): + """Visualizes an independent graph, or a graph executor.""" + value_map = {} + pb_graph = pb_graph or graph_pb2.GraphDef() + + if isinstance(graph, torch._C.GraphExecutorState): + visualize_graph_executor(graph, name_prefix, pb_graph, + partial(visualize, pb_graph=pb_graph)) + return pb_graph + + # Set up an input node + input_node = pb_graph.node.add(op='input', name=name_prefix + 'input') + for i, value in enumerate(graph.param_node().outputs()): + value_map[value.unique()] = name_prefix + 'input:' + str(i) + + visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it) + + # Gather all outputs + return_node = pb_graph.node.add(op='output', name=name_prefix + 'output') + for value in graph.return_node().inputs(): + return_node.input.append(value_map[value.unique()]) + + return pb_graph + + +def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph): + """Append the state of a given GraphExecutor to the graph protobuf. + + Args: + state (GraphExecutor or GraphExecutorState): GraphExecutor to display. + name_prefix (str): Name prefix of the containing subgraph. + pb_graph (GraphDef): graph to append to. + inline_graph (Callable): a function that handles setting up a value_map, + so that some graphs in here can be inlined. This is necessary, because + this will simply be `visualize` for the top-level GraphExecutor, + or `inline_graph` for all nested ones. + + The signature should look like (Graph, name_prefix) -> (). + It will be called exactly once. + + The strategy is to embed all different configurations as independent subgraphs, + while inlining the original graph as the one that actually produces the values. + """ + if state.autograd_fallback_graph is not None: + visualize(graph=state.autograd_fallback_graph, + name_prefix=name_prefix + 'autograd_fallback/', + pb_graph=pb_graph, + executors_it=iter(state.autograd_fallback.executors())) + + for i, (arg_spec, plan) in enumerate(state.execution_plans.items()): + subgraph_name = name_prefix + f'plan{i}/' + + # Create a disconnected node that will keep information regarding the input + # types of this trace. This is unfortunately a bit too verbose to be included + # in the subgraph name. + input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name) + input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii') + + visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors())) + + # Show gradient as an independent subgraph of this plan + if plan.grad_executor is not None: + grad_subgraph_name = subgraph_name + 'grad/' + visualize(plan.grad_executor, grad_subgraph_name, pb_graph) + + return inline_graph(state.graph, name_prefix + 'original/') + + +def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None): + """Recursive part of visualize (basically skips setting up the input and output nodes).""" + def inline_graph(subgraph, name, node): + rec_value_map = {inp.unique(): value_map[val.unique()] + for inp, val in zip(subgraph.inputs(), node.inputs())} + visualize_rec(graph=subgraph, + value_map=rec_value_map, + name_prefix=name, + pb_graph=pb_graph) + for out, val in zip(subgraph.outputs(), node.outputs()): + value_map[val.unique()] = rec_value_map[out.unique()] + + op_id_counter: DefaultDict[str, int] = defaultdict(int) + + def name_for(node): + kind = node.kind()[node.kind().index('::') + 2:] + op_id_counter[kind] += 1 + return kind, name_prefix + kind + '_' + str(op_id_counter[kind]) + + def add_fusion_group(node): + op, name = name_for(node) + inline_graph(node.g('Subgraph'), name + '/', node) + + def add_graph_executor(node): + op, name = name_for(node) + if executors_it is None: + add_node(node) + else: + ge = next(executors_it) + visualize_graph_executor(ge, name + '/', pb_graph, + partial(inline_graph, node=node)) + + def add_node(node): + if node.kind() == 'prim::FusionGroup': + return add_fusion_group(node) + elif node.kind() == 'prim::GraphExecutor': + return add_graph_executor(node) + op, name = name_for(node) + pb_node = pb_graph.node.add(op=op, name=name) + for value in node.inputs(): + pb_node.input.append(value_map[value.unique()]) + # TODO: handle attrs + for i, value in enumerate(node.outputs()): + value_map[value.unique()] = name + ':' + str(i) + + for node in graph.nodes(): + add_node(node) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8789fea17a17ffa8e490a8d744892c5140a70ee2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__init__.py @@ -0,0 +1,86 @@ +# mypy: allow-untyped-defs +from .fake_quantize import * # noqa: F403 +from .fuse_modules import fuse_modules +from .fuser_method_mappings import * # noqa: F403 +from .observer import * # noqa: F403 +from .qconfig import * # noqa: F403 +from .quant_type import * # noqa: F403 +from .quantization_mappings import * # noqa: F403 +from .quantize import * # noqa: F403 +from .quantize_jit import * # noqa: F403 +from .stubs import * # noqa: F403 + + +def default_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for data, target in calib_data: + model(data) + + +__all__ = [ + "QuantWrapper", + "QuantStub", + "DeQuantStub", + # Top level API for eager mode quantization + "quantize", + "quantize_dynamic", + "quantize_qat", + "prepare", + "convert", + "prepare_qat", + # Top level API for graph mode quantization on TorchScript + "quantize_jit", + "quantize_dynamic_jit", + "_prepare_ondevice_dynamic_jit", + "_convert_ondevice_dynamic_jit", + "_quantize_ondevice_dynamic_jit", + # Top level API for graph mode quantization on GraphModule(torch.fx) + # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx + # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx', + "QuantType", # quantization type + # custom module APIs + "get_default_static_quant_module_mappings", + "get_static_quant_module_class", + "get_default_dynamic_quant_module_mappings", + "get_default_qat_module_mappings", + "get_default_qconfig_propagation_list", + "get_default_compare_output_module_list", + "get_quantized_operator", + "get_fuser_method", + # Sub functions for `prepare` and `swap_module` + "propagate_qconfig_", + "add_quant_dequant", + "swap_module", + "default_eval_fn", + # Observers + "ObserverBase", + "WeightObserver", + "HistogramObserver", + "observer", + "default_observer", + "default_weight_observer", + "default_placeholder_observer", + "default_per_channel_weight_observer", + # FakeQuantize (for qat) + "default_fake_quant", + "default_weight_fake_quant", + "default_fixed_qparams_range_neg1to1_fake_quant", + "default_fixed_qparams_range_0to1_fake_quant", + "default_per_channel_weight_fake_quant", + "default_histogram_fake_quant", + # QConfig + "QConfig", + "default_qconfig", + "default_dynamic_qconfig", + "float16_dynamic_qconfig", + "float_qparams_weight_only_qconfig", + # QAT utilities + "default_qat_qconfig", + "prepare_qat", + "quantize_qat", + # module transformations + "fuse_modules", +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b0b9bf7f97066502444a53f14ec41aab68c3467 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..49ccc8e69523f7dbee2335b788a2cb3a7db618a2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py @@ -0,0 +1,28 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite import ( + _convert_tuple_to_list, + _dequantize_tensor_list, + _find_match, + _get_logger_dict_helper, + _is_identical_module_type, + compare_model_outputs, + compare_model_stub, + compare_weights, + get_logger_dict, + get_matching_activations, + Logger, + NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST, + OutputLogger, + prepare_model_outputs, + prepare_model_with_stubs, + Shadow, + ShadowLogger, +) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..55cd7085740d0ce8de79491acbfc4888ebba21f8 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py @@ -0,0 +1,26 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement +here. +""" + +from torch.ao.ns._numeric_suite_fx import ( + _add_loggers_impl, + _add_loggers_one_model, + _add_shadow_loggers_impl, + _extract_logger_info_one_model, + _extract_weights_impl, + _extract_weights_one_model, + add_loggers, + add_shadow_loggers, + extend_logger_results_with_comparison, + extract_logger_info, + extract_shadow_logger_info, + extract_weights, + NSTracer, + OutputLogger, + RNNReturnType, +) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..8d930c366c0dd9857e463005474a2d59c04c4ae6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py @@ -0,0 +1,133 @@ +# mypy: allow-untyped-defs +import torch + + +# Pack pairs of int4 values into int8, in row major order; first int4 +# value goes into lower order bits, and second int4 value into higher +# order bits of resulting int8 value. +def pack_int4_to_int8(weight): + assert weight.dim() == 2 + assert weight.shape[1] % 2 == 0 + assert weight.dtype == torch.int8 + return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF) + + +# Unpack quandruples of bits in int8 values into int4 values, in row +# major order; lower 4 bits go into first int4 value goes, and upper 4 +# bits go into second int4 value. +def unpack_int8_to_int4(weight): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view( + weight.shape[0], 2 * weight.shape[1] + ) + + +# Transpose the weight matrix, and then reorder its elements according +# to underlying requirements of CUTLASS library, so that it could be +# used for CUTLASS-based mixed datatypes linear operation. +def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass( + weight, dtypeq, transpose=False +): + assert weight.dim() == 2 + assert weight.dtype == torch.int8 + assert dtypeq == torch.int8 or dtypeq == torch.quint4x2 + assert weight.device.type == "cuda" + + device = weight.device + + # subbyte_transpose + if not transpose: + if dtypeq == torch.int8: + outp = weight.T + elif dtypeq == torch.quint4x2: + outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T) + else: + outp = weight + + ncols, nrows = outp.shape # type: ignore[possibly-undefined] + assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0 + assert ncols % 64 == 0 + + # permute_B_rows_for_mixed_gemm + # (permute cols actually, as transpose is applied first here) + if dtypeq == torch.quint4x2: + cols_permuted = ( + torch.tensor( + [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + else: + cols_permuted = ( + torch.tensor( + [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15], + device=device, + ) + + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand( + nrows // 16, 16 + ) + ).view(-1) + outp = outp.index_copy(1, cols_permuted, outp) + + # interleave_column_major_tensor + magic0 = 4 if dtypeq == torch.quint4x2 else 2 + magic1 = 32 // magic0 + + tmp0 = ( + (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0)) + .view(-1, 1) + .repeat(1, nrows // 4 * magic0) + .view(-1) + ) + tmp1 = ( + (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1)) + .view(-1, 1) + .repeat(1, magic1) + .view(-1) + .repeat(ncols) + ) + tmp2 = ( + (torch.arange(0, magic0, device=device) * magic1) + .view(-1, 1) + .repeat(1, nrows // 4) + .view(-1) + .repeat(ncols // magic0) + ) + tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1) + + outp_offsets = tmp0 + tmp1 + tmp2 + tmp3 + + tmp = outp.view(-1).view(torch.int32) + outp = torch.zeros_like(tmp) + outp.scatter_(0, outp_offsets, tmp) + outp = outp.view(weight.dtype) + + # add_bias_and_interleave_quantized_tensor_inplace + tmp = outp.view(-1) + + outp = torch.empty_like(tmp) + if dtypeq == torch.int8: + tmp = (tmp.to(torch.int) + 128).to(tmp.dtype) + outp[0::4] = tmp[0::4] + outp[1::4] = tmp[2::4] + outp[2::4] = tmp[1::4] + outp[3::4] = tmp[3::4] + elif dtypeq == torch.quint4x2: + tmp0 = ((tmp & 0xF) + 8) & 0xF + tmp0 = (tmp0[1::2] << 4) | tmp0[0::2] + tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF + tmp1 = (tmp1[1::2] << 4) | tmp1[0::2] + outp[0::4] = tmp0[0::2] + outp[1::4] = tmp0[1::2] + outp[2::4] = tmp1[0::2] + outp[3::4] = tmp1[1::2] + + if dtypeq == torch.quint4x2: + nrows *= 2 + ncols //= 2 + + return outp.view(nrows, ncols).view(torch.uint8) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb13ac96271fa7b926cc703918984760e6ede15 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.fuser_method_mappings import ( + _DEFAULT_OP_LIST_TO_FUSER_METHOD, + fuse_conv_bn, + fuse_conv_bn_relu, + fuse_linear_bn, + get_fuser_method, +) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..8b44a980ce82fbfa5a81ad906499806cf99b876f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/quantization_mappings.py`, while adding an import statement +here. +""" +from torch.ao.quantization.quantization_mappings import ( + _get_special_act_post_process, + _has_special_act_post_process, + _INCLUDE_QCONFIG_PROPAGATE_LIST, + DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, + DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS, + DEFAULT_MODULE_TO_ACT_POST_PROCESS, + DEFAULT_QAT_MODULE_MAPPINGS, + DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS, + DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, + get_default_compare_output_module_list, + get_default_dynamic_quant_module_mappings, + get_default_float_to_quantized_operator_mappings, + get_default_qat_module_mappings, + get_default_qconfig_propagation_list, + get_default_static_quant_module_mappings, + get_dynamic_quant_module_class, + get_quantized_operator, + get_static_quant_module_class, + no_observer_set, +) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d51d58f38d7462713f84ab62427852c1dd8e52c --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/quantization/utils.py @@ -0,0 +1,29 @@ +# flake8: noqa: F401 +r""" +Utils shared by different modes of quantization (eager/graph) + +This file is in the process of migration to `torch/ao/quantization`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +`torch/ao/quantization/utils.py`, while adding an import statement +here. +""" + +from torch.ao.quantization.utils import ( + activation_dtype, + activation_is_int8_quantized, + activation_is_statically_quantized, + calculate_qmin_qmax, + check_min_max_valid, + get_combined_dict, + get_qconfig_dtypes, + get_qparam_dict, + get_quant_type, + get_swapped_custom_module_class, + getattr_from_fqn, + is_per_channel, + is_per_tensor, + weight_dtype, + weight_is_quantized, + weight_is_statically_quantized, +) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..de042277c7c8fd67084cdf70026c1779755a164e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/__init__.py @@ -0,0 +1,5 @@ +from torch._C import FileCheck as FileCheck + +from . import _utils +from ._comparison import assert_allclose, assert_close as assert_close +from ._creation import make_tensor as make_tensor diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..c9789f19c3dcff455e1890543bba4f1665903346 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py @@ -0,0 +1,474 @@ +# mypy: ignore-errors + +import collections + +import torch +from torch.testing._internal.common_utils import TEST_WITH_ROCM +from torch.testing._internal.common_utils import TestCase + + +class AutocastTestLists: + def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype): + input = (torch.randn((n, n), device=dev, dtype=torch.float32),) + + hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else + torch.randn((n, n), device=dev, dtype=torch.float32),) + + weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih + torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh + torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih + torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh + + # returns args as a tuple + return input + hx + weights + + # Supplies ops and arguments for test_autocast_* in test/test_cuda.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py. + # Each op is associated with a tuple of valid arguments. + # In addition, cudnn conv ops are not supported on ROCm and hence will + # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ] + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + # The remaining lists organize ops that autocast treats explicitly. + self.torch_fp16 = [ + # deprecated _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True)), + # the current _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True, True)), + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("conv_tbc", conv_args_fp32[0] + bias_fp32), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)), + ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), + (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("prelu", pointwise0_fp32 + element0_fp32), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32), + ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32), + ("mm", mat0_fp32 + mat1_fp32), + ("mv", mat0_fp32 + pointwise0_fp32), + ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell. + # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)), + ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ] + self.torch_fp32 = [ + ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("cosh", pointwise0_fp16), + ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)), + ("exp", pointwise0_fp16), + ("expm1", pointwise0_fp16), + ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)), + ("reciprocal", pointwise0_fp16), + ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)), + ("sinh", pointwise0_fp16), + ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)), + # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API. + ("softmax", pointwise0_fp16 + (0,)), + ("log_softmax", pointwise0_fp16 + (0,)), + ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)), + ("group_norm", mat0_fp16 + (1,)), + ("norm", pointwise0_fp16), + ("norm", pointwise0_fp16, {"dim": 0}), + # these need magma + # ("norm", mat0_fp16, {"p": "nuc"}), + # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}), + ("norm", pointwise0_fp16, {"p": 1}), + ("norm", pointwise0_fp16, {"p": 1, "dim": 0}), + ("cosine_similarity", mat0_fp16 + mat1_fp16), + ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)), + ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16), + ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("cumprod", pointwise0_fp16 + (0,)), + ("cumsum", pointwise0_fp16 + (0,)), + ("dist", pointwise0_fp16 + pointwise1_fp16), + ("pdist", mat0_fp16), + ("cdist", mat0_fp16 + mat1_fp16), + ("prod", pointwise0_fp16), + ("prod", pointwise0_fp16 + (0,)), + ("renorm", mat0_fp16 + (2, 0, 1.0)), + ("sum", pointwise0_fp16), + ("sum", mat0_fp16 + (1,)), + ("logsumexp", mat0_fp16 + (1,)), + ] + self.torch_need_autocast_promote = [ + ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)), + ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16), + ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)), + ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev), + torch.randn((1, 2), dtype=torch.float32, device=dev), + torch.randn((1, 2, 2), dtype=torch.float16, device=dev), + torch.randn((1,), dtype=torch.float32, device=dev))), + ("cross", (torch.randn(3, dtype=torch.float32, device=dev), + torch.randn(3, dtype=torch.float16, device=dev))), + ("dot", pointwise0_fp16 + pointwise1_fp32), + ("vdot", pointwise0_fp16 + pointwise1_fp32), + ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev), + torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev), + 0, 0, False)), + ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float16))), + ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float32))), + ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float32, device=dev))), + ] + self.nn_fp16 = [ + ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32), + ] + self.nn_fp32 = [ + ("softplus", pointwise0_fp16), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_fp16 + mat1_fp16), + ("smooth_l1_loss", mat0_fp16 + mat1_fp16), + ("mse_loss", mat0_fp16 + mat1_fp16), + ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ] + self.linalg_fp16 = [ + ("linalg_vecdot", mat0_fp32 + mat0_fp32), + ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)), + ] + self.methods_fp16 = [ + ("__matmul__", mat0_fp32 + mat1_fp32) + ] + self.methods_fp32 = [ + ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)), + ] + self.banned = [ + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32), + torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn), + ] + + +class AutocastCPUTestLists: + # Supplies ops and arguments for test_autocast_* in test/test_cpu.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + + dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) + + dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) + for dimset in dummy_dimsets] + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev), + torch.randn(dimset, dtype=torch.bfloat16, device=dev)) + for dimset in dimsets] + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),) + for dimset in dummy_dimsets] + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. + # Each op is associated with a tuple of valid arguments. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + # The remaining lists organize ops that autocast treats explicitly. + self.torch_16 = [ + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("mm", mat0_fp32 + mat1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32), + torch.randn((5, 3, 5), device=dev, dtype=torch.float32), + torch.randn(5, device=dev, dtype=torch.float32), + 0)), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("prelu", pointwise0_fp32 + element0_fp32), + ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32), + torch.randn((3 * n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n), device=dev, dtype=torch.float32))), + ] + self.torch_fp32 = [ + ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)), + ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16), + ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ] + self.nn_16 = [ + ("linear", mat0_fp32 + mat1_fp32, {}), + ] + self.nn_fp32 = [ + ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_bf16 + mat1_bf16), + ("smooth_l1_loss", mat0_bf16 + mat1_bf16), + ("mse_loss", mat0_bf16 + mat1_bf16), + ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ("huber_loss", mat0_bf16 + mat1_bf16), + ] + self.torch_need_autocast_promote = [ + ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ] + + +class TestAutocast(TestCase): + def args_maybe_kwargs(self, op_with_args): + if len(op_with_args) == 2: + return op_with_args[0], op_with_args[1], {} + else: + return op_with_args[0], op_with_args[1], op_with_args[2] + + def _run_autocast_outofplace( + self, + op, + args, + run_as_type, + device, + out_type=None, + module=torch, + add_kwargs=None, + amp_dtype=torch.bfloat16, + ): + # helper to cast args + def cast(val, to_type): + if isinstance(val, torch.Tensor): + return val.to(to_type) if val.is_floating_point() else val + elif isinstance(val, collections.abc.Iterable): + return type(val)(cast(v, to_type) for v in val) + else: + return val + + if add_kwargs is None: + add_kwargs = {} + + self.assertFalse(torch.is_autocast_enabled(device_type=device)) + with torch.amp.autocast(device_type=device, dtype=amp_dtype): + self.assertTrue(torch.is_autocast_enabled(device_type=device)) + + out_type = out_type if out_type is not None else run_as_type + output = output_method = None + + # Try module.* variant, if requested: + if module is not None and hasattr(module, op): + output = getattr(module, op)(*args, **add_kwargs) + if isinstance(output, torch.Tensor): + self.assertTrue( + out_type == output.dtype, + f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}", + ) + # Try Tensor.* variant: + if hasattr(torch.Tensor, op): + output_method = getattr(args[0], op)(*args[1:], **add_kwargs) + if isinstance(output_method, torch.Tensor): + self.assertTrue( + out_type == output_method.dtype, + f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}", + ) + + self.assertTrue( + (output is not None) or (output_method is not None), + f"{op} not found as an attribute on either Tensor or the requested module {module}", + ) + + # Accounts for ops that return Tensors, iterables, and other non-Tensors. + # For example, lstm_cell returns a tuple and equal returns bool. + def compare(first, second): + if isinstance(first, torch.Tensor): + return torch.equal(first, second) + elif isinstance(first, collections.abc.Iterable): + return all(compare(f, s) for f, s in zip(first, second)) + else: + return first == second + + # If both torch.* and Tensor.* variants were found, check outputs are identical + if (output is not None) and (output_method is not None): + self.assertTrue(type(output) == type(output_method)) + comparison = compare(output, output_method) + self.assertTrue( + comparison, f"torch.{op} result did not match Tensor.{op} result" + ) + + # Compare numerics to Python-side "autocasting" that (we expect) does the same thing + # as the C++-side autocasting, and should be bitwise accurate. + output_to_compare = output if output is not None else output_method + with torch.amp.autocast(device_type=device, enabled=False): + self.assertFalse( + torch.is_autocast_enabled(device_type=device) + ) + + if module is not None and hasattr(module, op): + control = getattr(module, op)( + *cast(args, run_as_type), **add_kwargs + ) + else: + control = getattr(args[0].to(run_as_type), op)( + *cast(args[1:], run_as_type), **add_kwargs + ) + self.assertTrue(type(output_to_compare) == type(control)) + comparison = compare(output_to_compare, control) + self.assertTrue(comparison, f"torch.{op} result did not match control") + self.assertTrue(torch.is_autocast_enabled(device_type=device)) + self.assertFalse(torch.is_autocast_enabled(device_type=device)) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py new file mode 100644 index 0000000000000000000000000000000000000000..e092c4d9339b7c804328fe39826710f89354e49d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py @@ -0,0 +1,635 @@ +# mypy: ignore-errors + +import torch +from functools import partial +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np + +# Note: [autograd.Function db] +# +# This is a collection of autograd.Function test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if a subsystem +# supports autograd.Function. +# +# Axes: +# - saves {output, input, intermediate, non-tensor} +# - {inputs, output} x {single tensor, tensors, arbitrary objects} +# - Uses {mark_dirty, mark_non_differentiable, once_differentiable} + + +def to_numpy(tensor): + return tensor.cpu().numpy() + + +class NumpyCube(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + dinput = torch.tensor(3 * input_np ** 2, device=input.device) + return torch.tensor(input_np ** 3, device=input.device), dinput + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(inputs[0], output[1]) + ctx.save_for_forward(inputs[0], output[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input) + + @staticmethod + def vmap(info, in_dims, input): + result = NumpyCube.apply(input) + return result, (in_dims[0], in_dims[0]) + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +class CubeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x): + return x ** 3, 3 * x ** 2 + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(inputs[0], outputs[1]) + ctx.save_for_forward(inputs[0], outputs[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + result = grad_output * dinput + 6 * dinput + return result + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(1, low=0.8, high=2), args=()) + + +class NumpyCubeNotComposable(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + return torch.tensor(input_np ** 3, device=input.device), input_np + + @staticmethod + def setup_context(ctx, inputs, output): + _, input_np = output + ctx.input_np = input_np + ctx.device = inputs[0].device + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output, grad_saved): + result_np = 3 * (ctx.input_np ** 2) + return torch.tensor(result_np, device=ctx.device) + + +class NumpyMul(torch.autograd.Function): + @staticmethod + def forward(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = NumpyMul.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = NumpyMul.apply(grad_output, x) + return gx, gy + + @staticmethod + def vmap(info, in_dims, x, y): + x_bdim, y_bdim = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) + result = NumpyMul.apply(x, y) + result = result.movedim(-1, 0) + return result, 0 + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + +def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Broadcasting + yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),)) + +def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14}) + +class MulGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x * y + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = MulGenVmap.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = MulGenVmap.apply(grad_output, x) + return gx, gy + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + + +class NumpyExp_(torch.autograd.Function): + @staticmethod + def forward(x): + x_np = to_numpy(x) + np.exp(x_np, x_np) + return x + + @staticmethod + def setup_context(ctx, inputs, output): + x, = inputs + ctx.mark_dirty(x) + ctx.save_for_backward(output) + ctx.save_for_forward(output) + + @staticmethod + def backward(ctx, grad_output): + output, = ctx.saved_tensors + return NumpyMul.apply(grad_output, output) + + @staticmethod + def vmap(info, in_dims, x): + NumpyExp_.apply(x) + return x, in_dims[0] + + @staticmethod + def jvp(ctx, x_tangent): + # Doesn't call numpy operations because I didn't want to write NumpyMul_ + output, = ctx.saved_tensors + x_tangent.mul_(output) + return x_tangent + +class NumpySort(torch.autograd.Function): + @staticmethod + def forward(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(x, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + + @staticmethod + def setup_context(ctx, inputs, output): + x, dim = inputs + _, ind, ind_inv = output + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def vmap(info, in_dims, x, dim): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + # wrap dim + dim = dim if dim >= 0 else dim + x.dim() - 1 + return NumpySort.apply(x, dim + 1), (0, 0, 0) + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + +class SortGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, dim): + device = x.device + ind = torch.argsort(x, dim=dim) + ind_inv = torch.argsort(ind, axis=dim) + result = torch.take_along_dim(x, ind, dim=dim) + return result, ind, ind_inv + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, dim = inputs + _, ind, ind_inv = outputs + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + + +def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(1,)) + + +def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor = make_arg(3, 5) + dim = 1 + _, ind, ind_inv = NumpySort.apply(tensor, 1) + yield SampleInput(tensor, args=(ind, ind_inv, dim)) + + +class NumpyTake(torch.autograd.Function): + @staticmethod + def forward(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + + @staticmethod + def setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def vmap(info, in_dims, x, ind, ind_inv, dim): + x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims + + # wrap dim + logical_dim = x.dim() if x_bdim is None else x_bdim - 1 + dim = dim if dim >= 0 else dim + logical_dim + + def expand_bdim(x, x_bdim): + if x_bdim is None: + return x.expand(info.batch_size, *x.shape) + return x.movedim(x_bdim, 0) + + x = expand_bdim(x, x_bdim) + ind = expand_bdim(ind, ind_bdim) + ind_inv = expand_bdim(ind_inv, ind_inv_bdim) + + return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0 + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + assert ind_tangent is None + assert ind_inv_tangent is None + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim) + +class TakeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, ind, ind_inv, dim): + return torch.take_along_dim(x, ind, dim) + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim) + +class Select(torch.autograd.Function): + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return Select.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return Select.apply(x_tangent, ctx.idx) + +class SelectGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def jvp(ctx, x_tangent, _): + return SelectGenVmap.apply(x_tangent, ctx.idx) + + +def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(2,)) + +class ScaleGradGenVmap(torch.autograd.Function): + generate_vmap_rule = True + scale = 3.14 + + @staticmethod + def forward(x): + return x.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ScaleGradGenVmap.scale + + @staticmethod + def jvp(ctx, x_tangent): + return x_tangent * ScaleGradGenVmap.scale + +class ZeroGradientsGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x.clone(), y.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + # Intentionally too-large gradient + torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + @staticmethod + def jvp(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + +def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5)) + + +class ForwardHasDefaultArgs(torch.autograd.Function): + @staticmethod + def forward(x, idx=(2,)): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return ForwardHasDefaultArgs.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx) + + +autograd_function_db = [ + OpInfo( + 'NumpyCubeAutogradFunction', + op=NumpyCube.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyExpMarkDirtyAutogradFunction', + op=lambda x: NumpyExp_.apply(x.clone()), + inplace_variant=NumpyExp_.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulAutogradFunction', + op=NumpyMul.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyCubeNotComposableAutogradFunction', + op=lambda x: NumpyCubeNotComposable.apply(x)[0], + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortAutogradFunction', + op=NumpySort.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'NumpyTakeAutogradFunction', + op=NumpyTake.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SelectAutogradFunction', + op=Select.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'CubeGenVmapAutogradFunction', + op=CubeGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'MulGenVmapAutogradFunction', + op=MulGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SortGenVmapAutogradFunction', + op=SortGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'SelectGenVmapAutogradFunction', + op=SelectGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ScaleGradGenVmapAutogradFunction', + op=ScaleGradGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ZeroGradientsGenVmapAutogradFunction', + op=ZeroGradientsGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ForwardHasDefaultArgsAutogradFunction', + op=ForwardHasDefaultArgs.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_forward_default_args, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py new file mode 100644 index 0000000000000000000000000000000000000000..661614ffc80937930d4d2709adaab0669b4ebe00 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py @@ -0,0 +1,165 @@ +# mypy: ignore-errors + +import os +import re +import sys +from typing import List + +__all__ = [ + "check_code_for_cuda_kernel_launches", + "check_cuda_kernel_launches", +] + +# FILES TO EXCLUDE (match is done with suffix using `endswith`) +# You wouldn't drive without a seatbelt, though, so why would you +# launch a kernel without some safety? Use this as a quick workaround +# for a problem with the checker, fix the checker, then de-exclude +# the files in question. +exclude_files: List[str] = [] + +# Without using a C++ AST we can't 100% detect kernel launches, so we +# model them as having the pattern "<<>>(arguments);" +# We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be +# the next statement. +# +# We model the next statement as ending at the next `}` or `;`. +# If we see `}` then a clause ended (bad) if we see a semi-colon then +# we expect the launch check just before it. +# +# Since the kernel launch can include lambda statements, it's important +# to find the correct end-paren of the kernel launch. Doing this with +# pure regex requires recursive regex, which aren't part of the Python +# standard library. To avoid an additional dependency, we build a prefix +# regex that finds the start of a kernel launch, use a paren-matching +# algorithm to find the end of the launch, and then another regex to +# determine if a launch check is present. + +# Finds potential starts of kernel launches +kernel_launch_start = re.compile( + r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE +) + +# This pattern should start at the character after the final paren of the +# kernel launch. It returns a match if the launch check is not the next statement +has_check = re.compile( + r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE +) + +def find_matching_paren(s: str, startpos: int) -> int: + """Given a string "prefix (unknown number of characters) suffix" + and the position of the first `(` returns the index of the character + 1 past the `)`, accounting for paren nesting + """ + opening = 0 + for i, c in enumerate(s[startpos:]): + if c == '(': + opening += 1 + elif c == ')': + opening -= 1 + if opening == 0: + return startpos + i + 1 + + raise IndexError("Closing parens not found!") + + +def should_exclude_file(filename) -> bool: + for exclude_suffix in exclude_files: + if filename.endswith(exclude_suffix): + return True + return False + + +def check_code_for_cuda_kernel_launches(code, filename=None): + """Checks code for CUDA kernel launches without cuda error checks. + + Args: + filename - Filename of file containing the code. Used only for display + purposes, so you can put anything here. + code - The code to check + + Returns: + The number of unsafe kernel launches in the code + """ + if filename is None: + filename = "##Python Function Call##" + + # We break the code apart and put it back together to add + # helpful line numberings for identifying problem areas + code = enumerate(code.split("\n")) # Split by line breaks + code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines + code = '\n'.join(code) # Put it back together + + num_launches_without_checks = 0 + for m in kernel_launch_start.finditer(code): + end_paren = find_matching_paren(code, m.end() - 1) + if has_check.match(code, end_paren): + num_launches_without_checks += 1 + context = code[m.start():end_paren + 1] + print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr) + + return num_launches_without_checks + + +def check_file(filename): + """Checks a file for CUDA kernel launches without cuda error checks + + Args: + filename - File to check + + Returns: + The number of unsafe kernel launches in the file + """ + if not (filename.endswith((".cu", ".cuh"))): + return 0 + if should_exclude_file(filename): + return 0 + with open(filename) as fo: + contents = fo.read() + unsafeCount = check_code_for_cuda_kernel_launches(contents, filename) + return unsafeCount + + +def check_cuda_kernel_launches(): + """Checks all pytorch code for CUDA kernel launches without cuda error checks + + Returns: + The number of unsafe kernel launches in the codebase + """ + torch_dir = os.path.dirname(os.path.realpath(__file__)) + torch_dir = os.path.dirname(torch_dir) # Go up to parent torch + torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2 + + kernels_without_checks = 0 + files_without_checks = [] + for root, dirnames, filenames in os.walk(torch_dir): + # `$BASE/build` and `$BASE/torch/include` are generated + # so we don't want to flag their contents + if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"): + # Curtail search by modifying dirnames and filenames in place + # Yes, this is the way to do this, see `help(os.walk)` + dirnames[:] = [] + continue + + for x in filenames: + filename = os.path.join(root, x) + file_result = check_file(filename) + if file_result > 0: + kernels_without_checks += file_result + files_without_checks.append(filename) + + if kernels_without_checks > 0: + count_str = f"Found {kernels_without_checks} instances in " \ + f"{len(files_without_checks)} files where kernel " \ + "launches didn't have checks." + print(count_str, file=sys.stderr) + print("Files without checks:", file=sys.stderr) + for x in files_without_checks: + print(f"\t{x}", file=sys.stderr) + print(count_str, file=sys.stderr) + + return kernels_without_checks + + +if __name__ == "__main__": + unsafe_launches = check_cuda_kernel_launches() + sys.exit(0 if unsafe_launches == 0 else 1) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e3572cfc4c6a0ddc3d8fa2e1b056415204acdfa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py @@ -0,0 +1 @@ +# mypy: ignore-errors diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c5a52a316ccb990c725b30af606aff7bf3f5ef Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py new file mode 100644 index 0000000000000000000000000000000000000000..e7bce5c37f3d95fc664b50764e9b9756b89c6cd0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py @@ -0,0 +1,111 @@ +# mypy: ignore-errors + +# Owner(s): ["oncall: distributed"] + +from typing import Tuple + +import torch +import torch.nn as nn + + +class UnitModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.seq(self.l1(x))) + + +class CompositeModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.u2(self.u1(self.l1(x)))) + + +class UnitParamModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + + def forward(self, x): + return torch.mm(self.seq(self.l(x)), self.p) + + +class CompositeParamModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + self.register_buffer( + "buffer", torch.randn((100, 100), device=device), persistent=True + ) + + def forward(self, x): + a = self.u2(self.u1(self.l(x))) + b = self.p + return torch.mm(a, b) + + +class FakeSequential(nn.Module): + # Define this class to achieve a desired nested wrapping using the module + # wrap policy with `nn.Sequential` + def __init__(self, *modules: Tuple[nn.Module, ...]) -> None: + super().__init__() + self._module_sequence = list(modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for module in self._module_sequence: + x = module(x) + return x + + +class NestedSequentialModel(nn.Module): + def __init__(self, device: torch.device) -> None: + super().__init__() + # This nested structure exercises traversal order to catch differences + # between valid traversals (e.g. BFS and DFS variations). + self.seq1 = nn.Sequential( + nn.Linear(1, 1, device=device), + FakeSequential( + nn.Linear(1, 1, device=device), + nn.ReLU(), + FakeSequential( + nn.Linear(1, 1, device=device), + ), + nn.ReLU(), + ), + nn.Linear(1, 2, device=device), + ) + self.lin = nn.Linear(2, 2, device=device) + self.seq2 = nn.Sequential( + nn.ReLU(), + nn.Linear(2, 3, device=device), + FakeSequential( + nn.Linear(3, 2, bias=False, device=device), + nn.Linear(2, 4, bias=False, device=device), + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.seq2(self.lin(self.seq1(x))) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..8568094dc40def4a0ce3933f5a197c5a334a8f89 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py @@ -0,0 +1,1422 @@ +# mypy: ignore-errors + +import abc +import faulthandler +import itertools +import logging +import multiprocessing +import os +import queue +import subprocess +import sys +import tempfile +import threading +import time +import traceback +import types +import unittest +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import timedelta +from enum import Enum +from functools import partial, reduce, wraps +from io import StringIO +from typing import Dict, NamedTuple, Optional, Union, List, Any, Callable, Tuple +from unittest.mock import patch + +import torch +import torch._dynamo.test_case +import torch.cuda.nccl +import torch.distributed as c10d +import torch.nn as nn +from torch.testing._internal.common_utils import ( + FILE_SCHEMA, + find_free_port, + IS_SANDCASTLE, + retry_on_connect_failures, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, + TEST_WITH_ROCM, + TEST_WITH_TSAN, + TestCase, + run_tests, +) +from torch.testing._internal.distributed.multi_threaded_pg import ( + _install_threaded_pg, + _uninstall_threaded_pg, + ProcessLocalGroup, +) +import operator + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class TestSkip(NamedTuple): + exit_code: int + message: str + + +TEST_SKIPS = { + "backend_unavailable": TestSkip( + 72, "Skipped because distributed backend is not available." + ), + "small_worldsize": TestSkip(73, "Skipped due to small world size."), + "odd_worldsize": TestSkip(87, "Skipped due to odd world size."), + "no_cuda": TestSkip(74, "CUDA is not available."), + "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), + "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), + "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), + "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), + "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), + "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), + "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), + "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), + "nccl": TestSkip(76, "c10d not compiled with NCCL support"), + "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), + "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), + "generic": TestSkip( + 86, "Test skipped at subprocess level, look at subprocess log for skip reason" + ), + "importerror": TestSkip(88, "Test skipped due to missing import"), +} + + +@dataclass +class DistTestCases: + # Backends that do not support a specific collective + skip_collective = {} + skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"} + skip_collective["reduce"] = set() + skip_collective["sendrecv anysource"] = {"nccl", "ucc"} + skip_collective["cpu barrier"] = {"nccl", "ucc"} + + # Sets showing that something is implemented + backend_feature = {} + backend_feature["gpu"] = {"nccl", "gloo", "ucc"} + backend_feature["cuda"] = {"nccl", "gloo", "ucc"} + backend_feature["ddp"] = {"nccl", "gloo", "ucc"} + backend_feature["subgroup"] = {"nccl", "gloo", "ucc"} + backend_feature["plugin"] = set() + + +def skip_if_no_gpu(func): + """Skips if the world size exceeds the number of GPUs, ensuring that if the + test is run, each rank has its own GPU via ``torch.cuda.device(rank)``.""" + + @wraps(func) + def wrapper(*args, **kwargs): + if not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + world_size = int(os.environ["WORLD_SIZE"]) + if torch.cuda.device_count() < world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_small_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2: + sys.exit(TEST_SKIPS["small_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_odd_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1: + sys.exit(TEST_SKIPS["odd_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def require_n_gpus_for_nccl_backend(n, backend): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < n: + sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def import_transformers_or_skip(): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + from transformers import ( # noqa: F401 + AutoModelForMaskedLM, + BertConfig, + ) + + return func(*args, **kwargs) + except ImportError: + sys.exit(TEST_SKIPS["importerror"].exit_code) + + return wrapper + + return decorator + + +def at_least_x_gpu(x): + return torch.cuda.is_available() and torch.cuda.device_count() >= x + + +def skip_if_lt_x_gpu(x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +# This decorator helps avoiding initializing cuda while testing other backends +def nccl_skip_if_lt_x_gpu(backend, x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend != "nccl": + return func(*args, **kwargs) + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +def verify_ddp_error_logged(model_DDP, err_substr): + # Verify error was logged in ddp_logging_data. + ddp_logging_data = model_DDP._get_ddp_logging_data() + assert "iteration" in ddp_logging_data + assert "has_error" in ddp_logging_data + assert "error" in ddp_logging_data + logging_err = ddp_logging_data["error"] + # Remove C++ stacktrace if needed. + actual = ( + err_substr + if err_substr.find("\nException raised from ") == -1 + else err_substr.split("\nException raised from ")[0] + ) + assert ( + actual in logging_err + ), f"Did not find expected {actual} in ddp logging data error: {logging_err}" + + +def with_nccl_blocking_wait(func): + """ + Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of + this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for + the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and + TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING + try: + cached_nccl_async_error_handling: Union[str, None] = os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] + del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] + except KeyError: + # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset + cached_nccl_async_error_handling = None + + # Save val of TORCH_NCCL_BLOCKING_WAIT and set it. + try: + cached_nccl_blocking_wait: Union[str, None] = os.environ[ + "TORCH_NCCL_BLOCKING_WAIT" + ] + except KeyError: + cached_nccl_blocking_wait = None + finally: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + + try: + ret = func(*args, **kwargs) + return ret + finally: + # restore old values. + if cached_nccl_async_error_handling is not None: + os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] = cached_nccl_async_error_handling + + if cached_nccl_blocking_wait is not None: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait + + return wrapper + + +def with_dist_debug_levels(levels): + """ + Runs a test for each distributed debug level specified in levels. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) + for level in levels: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = level + c10d.set_debug_level_from_env() + ret = func(*args, **kwargs) + c10d.barrier() + if old_level is not None: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level + # Only returns test return for last test, but since these are + # unittests the return value is not really used and earlier tests + # would've raised had they failed. + return ret + + return wrapper + + return decorator + + +def requires_gloo(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_gloo_available(), + "c10d was not compiled with the Gloo backend", + ) + + +def requires_nccl_version(version, msg): + if not c10d.is_nccl_available(): + return skip_but_pass_in_sandcastle( + "c10d was not compiled with the NCCL backend", + ) + else: + return skip_but_pass_in_sandcastle_if( + torch.cuda.nccl.version() < version, + f"Requires NCCL version greater than or equal to: {version}, found: {torch.cuda.nccl.version()}, reason: {msg}", + ) + + +def requires_nccl(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_nccl_available(), + "c10d was not compiled with the NCCL backend", + ) + +def requires_ucc(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_ucc_available(), + "c10d was not compiled with the UCC backend", + ) + +def requires_mpi(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_mpi_available(), + "c10d was not compiled with the MPI backend", + ) + + +def skip_if_rocm_multiprocess(func): + """Skips a test for ROCm""" + func.skip_if_rocm_multiprocess = True + + @wraps(func) + def wrapper(*args, **kwargs): + if not TEST_WITH_ROCM: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS["skipIfRocm"].exit_code) + + return wrapper + + +def skip_if_win32(): + return skip_but_pass_in_sandcastle_if( + sys.platform == "win32", + "This unit test case is not supported on Windows platform", + ) + + +@retry_on_connect_failures +def create_tcp_store( + addr="localhost", + world_size=1, + is_master=True, + timeout=timedelta(minutes=5), + wait_for_workers=True, + jit_class=False, + use_libuv=True, +): + """ + Creates a TCP store. Retries if the chosen port is already in use. + """ + port = find_free_port() + if jit_class: + timeout_millisecond = int(timeout / timedelta(milliseconds=1)) + return torch.classes.dist_c10d.TCPStore( + addr, port, world_size, is_master, timeout_millisecond + ) + else: + return c10d.TCPStore( + addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv + ) + + +if TEST_WITH_TSAN: + # TSAN runs much slower. + TIMEOUT_DEFAULT = 500 +else: + TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300')) +TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} + + +# https://github.com/pytorch/pytorch/issues/75665 +if TEST_WITH_ROCM: + TIMEOUT_OVERRIDE["test_join_kwargs"] = 200 + + +def create_device(interface=None): + if sys.platform == "win32" or interface is None: + return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") + else: + return c10d.ProcessGroupGloo.create_device(interface=interface) + + +def get_timeout(test_id) -> int: + return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT) + + +@contextmanager +def captured_output(): + new_out, new_err = StringIO(), StringIO() + old_out, old_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_out, new_err + yield sys.stdout, sys.stderr + finally: + sys.stdout, sys.stderr = old_out, old_err + + +def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): + """ + Generate a number of basic test cases for sparse reduction. + These cover tensors with a varying number of sparse dimensions and a varying + number of dense dimensions. The only reduction operation we support is sum. + """ + + def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): + # First sparse dimension is [0..rank]. + # Subsequent dimensions are always 0, so we know there is + # a non-empty intersection between any two sparse tensors. + indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) + shape = [world_size] + [2 for _ in range(dense_dims)] + for _ in range(sparse_dims - 1): + indices = torch.cat((indices, torch.zeros(1, rank + 1))) + shape.append(world_size) + values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) + return torch.sparse_coo_tensor(indices, values, shape) + + def compute_sum(fn, world_size: int): + return reduce( + operator.add, [fn(rank, world_size) for rank in range(world_size)] + ) + + return [ + ( + [ + fn(num_inputs * rank + i, num_inputs * world_size) + for i in range(num_inputs) + ], + [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)], + ) + for fn in [ + partial(generate, sparse_dims=1), + partial(generate, sparse_dims=2), + partial(generate, sparse_dims=3), + partial(generate, dense_dims=1), + partial(generate, dense_dims=2), + partial(generate, dense_dims=3), + ] + ] + + +# HELPER FOR MULTIGPU TESTS +def init_multigpu_helper(world_size: int, backend: str): + """Multigpu tests are designed to simulate the multi nodes with multi + GPUs on each node. Nccl backend requires equal #GPUs in each process. + On a single node, all visible GPUs are evenly + divided to subsets, each process only uses a subset. + """ + nGPUs = torch.cuda.device_count() + visible_devices = range(nGPUs) + + # If rank is less than or equal to number of available GPU's + # then each rank can be mapped to corresponding GPU. + nGPUs_per_process = 1 + if world_size > nGPUs: + nGPUs_per_process = nGPUs // world_size + rank_to_GPU = { + i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]) + for i in range(world_size) + } + return rank_to_GPU + + +tmp_dir: Optional[tempfile.TemporaryDirectory] = None + + +def initialize_temp_directories(init_method: Optional[str] = None) -> None: + global tmp_dir + tmp_dir = tempfile.TemporaryDirectory() + os.environ["TEMP_DIR"] = tmp_dir.name + os.mkdir(os.path.join(tmp_dir.name, "barrier")) + os.mkdir(os.path.join(tmp_dir.name, "test_dir")) + init_dir_path = os.path.join(tmp_dir.name, "init_dir") + os.mkdir(init_dir_path) + # Set init method if specified. + if init_method is not None: + os.environ["INIT_METHOD"] = init_method + else: + os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( + init_dir_path, "shared_init_file" + ) + + +def cleanup_temp_dir() -> None: + if tmp_dir is not None: + tmp_dir.cleanup() + + +# Most tests operate with this worldsize +DEFAULT_WORLD_SIZE = 4 + +# [How does MultiProcessTestCase work?] +# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by +# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an +# example which inherits from this class. Its `Setup()` methods calls into +# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` +# subprocesses. During the spawn, the main process passes the test name to +# subprocesses, and the name is acquired from self.id(). The subprocesses +# then use the provided test function name to retrieve the function attribute +# from the test instance and run it. The main process simply waits for all +# subprocesses to join. + + +class MultiProcessTestCase(TestCase): + MAIN_PROCESS_RANK = -1 + # This exit code is used to indicate that the test code had an error and + # exited abnormally. There are certain tests that might use sys.exit() to + # simulate failures and in those cases, we can't have an exit code of 0, + # but we still want to ensure we didn't run into any other errors. + TEST_ERROR_EXIT_CODE = 10 + + # do not early terminate for distributed tests. + def _should_stop_test_suite(self) -> bool: + return False + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_PROCESS_RANK: + self._join_processes(fn) + else: + fn() + + return types.MethodType(wrapper, self) + + # The main process spawns N subprocesses that run the test. + # Constructor patches current instance test method to + # assume the role of the main process and join its subprocesses, + # or run the underlying test function. + def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None: + # methodName is the correct naming in unittest and testslide uses keyword arguments. + # So we need to use both to 1) not break BC and, 2) support testslide. + if methodName != "runTest": + method_name = methodName + super().__init__(method_name) + fn = getattr(self, method_name) + setattr(self, method_name, self.join_or_run(fn)) + + def setUp(self) -> None: + super().setUp() + self.skip_return_code_checks = [] # type: ignore[var-annotated] + self.processes = [] # type: ignore[var-annotated] + self.rank = self.MAIN_PROCESS_RANK + self.file_name = tempfile.NamedTemporaryFile(delete=False).name + # pid to pipe consisting of error message from process. + self.pid_to_pipe = {} # type: ignore[var-annotated] + + def tearDown(self) -> None: + super().tearDown() + for p in self.processes: + p.terminate() + # Each Process instance holds a few open file descriptors. The unittest + # runner creates a new TestCase instance for each test method and keeps + # it alive until the end of the entire suite. We must thus reset the + # processes to prevent an effective file descriptor leak. + self.processes = [] + + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def _start_processes(self, proc) -> None: + self.processes = [] + for rank in range(int(self.world_size)): + parent_conn, child_conn = torch.multiprocessing.Pipe() + process = proc( + target=self.__class__._run, + name="process " + str(rank), + args=(rank, self._current_test_name(), self.file_name, child_conn), + kwargs={ + "fake_pg": getattr(self, "fake_pg", False), + } + ) + process.start() + logger.info("Started process %s with pid %s", rank, process.pid) + self.pid_to_pipe[process.pid] = parent_conn + self.processes.append(process) + + def _spawn_processes(self) -> None: + proc = torch.multiprocessing.get_context("spawn").Process + self._start_processes(proc) + + class Event(Enum): + GET_TRACEBACK = 1 + + @staticmethod + def _event_listener(parent_pipe, signal_pipe, rank: int): + logger.info("Starting event listener thread for rank %s", rank) + while True: + ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) + + if parent_pipe in ready_pipes: + + if parent_pipe.closed: + logger.info( + "Pipe closed for process %s, stopping event listener thread", rank + ) + return + + event = parent_pipe.recv() + logger.info("Received event %s on process %s", event, rank) + + if event == MultiProcessTestCase.Event.GET_TRACEBACK: + # Return traceback to the parent process. + with tempfile.NamedTemporaryFile(mode="r+") as tmp_file: + faulthandler.dump_traceback(tmp_file) + # Flush buffers and seek to read from the beginning + tmp_file.flush() + tmp_file.seek(0) + parent_pipe.send(tmp_file.read()) + + logger.info("Process %s sent traceback", rank) + + if signal_pipe in ready_pipes: + return + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None: + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + def run_test(self, test_name: str, parent_pipe) -> None: + # Start event listener thread. + signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) + event_listener_thread = threading.Thread( + target=MultiProcessTestCase._event_listener, + args=(parent_pipe, signal_recv_pipe, self.rank), + daemon=True, + ) + event_listener_thread.start() + if sys.platform != "win32" and sys.platform != "darwin": + # Register signal handler to dump stack traces on FATALs. + # Windows and MacOS do not support the signal handlers. + torch._C._set_print_stack_traces_on_fatal_signal(True) + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + # self.id() == e.g. '__main__.TestDistributed.test_get_rank' + # We're retrieving a corresponding test and executing it. + try: + getattr(self, test_name)() + except unittest.SkipTest as se: + logger.info( + "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) + ) + sys.exit(TEST_SKIPS["generic"].exit_code) + except Exception as e: + logger.error( + "Caught exception: \n%s exiting " + "process %s with exit code: %s", + traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ) + # Send error to parent process. + parent_pipe.send(traceback.format_exc()) + sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) + finally: + if signal_send_pipe is not None: + signal_send_pipe.send(None) + + assert event_listener_thread is not None + event_listener_thread.join() + # Close pipe after done with test. + parent_pipe.close() + + def _get_timedout_process_traceback(self) -> None: + pipes = [] + for i, process in enumerate(self.processes): + if process.exitcode is None: + pipe = self.pid_to_pipe[process.pid] + try: + pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) + pipes.append((i, pipe)) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", i, e + ) + + # Wait for results. + for rank, pipe in pipes: + try: + # Wait for traceback + if pipe.poll(5): + if pipe.closed: + logger.info( + "Pipe closed for process %s, cannot retrieve traceback", rank + ) + continue + + traceback = pipe.recv() + logger.error( + "Process %s timed out with traceback: \n\n%s", rank, traceback + ) + else: + logger.error( + "Could not retrieve traceback for timed out process: %s", rank + ) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", rank, e + ) + + def _join_processes(self, fn) -> None: + timeout = get_timeout(self.id()) + start_time = time.time() + subprocess_error = False + try: + while True: + # check to see if any subprocess exited with an error early. + for (i, p) in enumerate(self.processes): + # This is the exit code processes exit with if they + # encountered an exception. + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: + print( + f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes." + ) + active_children = torch.multiprocessing.active_children() + for ac in active_children: + ac.terminate() + subprocess_error = True + break + if subprocess_error: + break + # All processes have joined cleanly if they all a valid exitcode + if all(p.exitcode is not None for p in self.processes): + break + # Check if we should time out the test. If so, we terminate each process. + elapsed = time.time() - start_time + if elapsed > timeout: + self._get_timedout_process_traceback() + print( + f"Timing out after {timeout} seconds and killing subprocesses." + ) + for p in self.processes: + p.terminate() + break + # Sleep to avoid excessive busy polling. + time.sleep(0.1) + + elapsed_time = time.time() - start_time + + if fn in self.skip_return_code_checks: + self._check_no_test_errors(elapsed_time) + else: + self._check_return_codes(elapsed_time) + finally: + # Close all pipes + for pipe in self.pid_to_pipe.values(): + pipe.close() + + def _check_no_test_errors(self, elapsed_time) -> None: + """ + Checks that we didn't have any errors thrown in the child processes. + """ + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} timed out after {elapsed_time} seconds" + ) + self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) + + def _check_return_codes(self, elapsed_time) -> None: + """ + Checks that the return codes of all spawned processes match, and skips + tests if they returned a return code indicating a skipping condition. + """ + # If no processes are spawned, there is nothing to check. + if not self.processes: + logger.warning("Note: no subprocesses were spawned, test was likely skipped.") + return + + first_process = self.processes[0] + # first, we check if there are errors in actual processes + # (via TEST_ERROR_EXIT CODE), and raise an exception for those. + # the reason we do this is to attempt to raise a more helpful error + # message than "Process x terminated/timed out" + # TODO: we should pipe the exception of the failed subprocess here. + # Currently, the actual exception is displayed as a logging output. + errored_processes = [ + (i, p) + for i, p in enumerate(self.processes) + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ] + if errored_processes: + error = "" + for i, process in errored_processes: + # Get error from pipe. + error_message = self.pid_to_pipe[process.pid].recv() + error += ( + f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} " + f"and exception:\n{error_message}\n" + ) + + raise RuntimeError(error) + # If no process exited uncleanly, we check for timeouts, and then ensure + # each process exited cleanly. + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} terminated or timed out after {elapsed_time} seconds" + ) + self.assertEqual( + p.exitcode, + first_process.exitcode, + msg=f"Expect process {i} exit code to match Process 0 exit code of {first_process.exitcode}, but got {p.exitcode}", + ) + for skip in TEST_SKIPS.values(): + if first_process.exitcode == skip.exit_code: + if IS_SANDCASTLE: + # Don't use unittest.skip to skip the test on sandcastle + # since it creates tasks for skipped tests assuming there + # is some follow-up needed. Instead just "pass" the test + # with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + self.assertEqual( + first_process.exitcode, + 0, + msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", + ) + + @property + def is_master(self) -> bool: + return self.rank == 0 + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + torch._dynamo.reset() + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + torch._dynamo.reset() + c10d.barrier() + + +# Cannot use functools.cache as it requires python 3.9 +EFA_PROBE_RESULT = None + + +def has_efa() -> bool: + """ + If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has + Libfabric EFA interfaces and EFA software components installed, + see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html. + """ + global EFA_PROBE_RESULT + if EFA_PROBE_RESULT is not None: + return EFA_PROBE_RESULT + + try: + EFA_PROBE_RESULT = ( + subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0 + ) + except FileNotFoundError: + EFA_PROBE_RESULT = False + return EFA_PROBE_RESULT + + +def tp_transports(): + """ + If the machine has Libfabric EFA interfaces and EFA software components installed it may cause + 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe + uses InfiniBand transport, so we exclude it from tensorpipe transports, + see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022 + """ + return ["shm", "uv"] if has_efa() else None + + +def spawn_threads_and_init_comms( + func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE +): + """ + Wrapper to use with a test method + """ + if func is None: + return partial( + spawn_threads_and_init_comms, timeout=timeout, world_size=world_size + ) + + + def _run_test_method_with_multi_threads(world_size, callback): + world = _install_threaded_pg() + global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + def worker(rank, world_pg, store): + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=store + ) + try: + callback() + except BaseException as ex: + # Exceptions are handled in MultiThreadedTestCase + MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + if world_is_valid(): + c10d.destroy_process_group() + + threads = [] + for rank in range(world_size): + t = threading.Thread(target=worker, args=(rank, world, global_store)) + t.start() + threads.append(t) + + return threads + + + @wraps(func) + def wrapper(self, *args, **kwargs): + # TODO: get test name from kwargs + torch._C._distributed_c10d._set_thread_isolation_mode(True) + try: + threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs)) + # join and error handling + MultiThreadedTestCase._join_threads(threads, func) + finally: + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + return wrapper + + +class MultiThreadedTestCase(TestCase): + """ + Test runner that runs all tests with the in-proc process group using + multiple threads with the threaded process group. + + Each test spawns world_size threads and run the test method in each thread. + + Difference from regular MultiProcess test runner: + Must explicitly defines SetUp and call self._spawn_threads() to run the tests. + Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown) + to set up / tear down each thread when running each test. + No global state possible + How bad of a limitation is this? + """ + exception_queue = queue.Queue() + + MAIN_THREAD_RANK = -1 + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_THREAD_RANK: + self._join_threads(self.threads, fn) + else: + fn() + + return types.MethodType(wrapper, self) + + def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None: + # methodName is the correct naming in unittest and testslide uses keyword arguments. + # So we need to use both to 1) not break BC and, 2) support testslide. + if methodName != "runTest": + method_name = methodName + super().__init__(method_name) + fn = getattr(self, method_name) + setattr(self, method_name, self.join_or_run(fn)) + + def perThreadSetUp(self): + # super().setUp() # TestCase.setUp() calls torch.manual_seed() + pass + + def perThreadTearDown(self): + pass + + def setUp(self) -> None: + """ + setUp only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadSetUp + """ + super().setUp() + self.rank = self.MAIN_THREAD_RANK + self.threads = [] + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + def tearDown(self): + """ + tearDown only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadTearDown + """ + super().tearDown() + self.threads = [] + + def _spawn_threads(self): + """ + class method to spawn threads and run test, use this method in the SetUp of your TestCase + """ + torch._C._distributed_c10d._set_thread_isolation_mode(True) + test_name = self._current_test_name + # for each test case, we need to create thread local world, and a global store + world = _install_threaded_pg() + self.__class__.global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + if not world_is_valid(): + raise RuntimeError("Invalid world") + + for rank in range(self.world_size): + t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size)) + t.start() + self.threads.append(t) + + @classmethod + def _run(cls, test_name, rank, world_size, **kwargs): + self = cls(test_name) + self.rank = rank + + # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make + # every thread have the same value. This would be relevant when we use op db tests, where it + # needs those states to be set i.e. using instantiate_device_type_tests() + # TODO: figure out a better way to do this + if hasattr(self, "_tls"): + self._tls = threading.local() + self._tls.precision = TestCase._precision + self._tls.rel_tol = TestCase._rel_tol + + self.run_test_with_threaded_pg(test_name, rank, world_size) + + def run_test_with_threaded_pg(self, test_name, rank, world_size): + """ + Run the current test associated with `test_name` using the threaded process group. + """ + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store + ) + self.perThreadSetUp() + + try: + getattr(self, test_name)() + except BaseException as ex: + self.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + c10d.destroy_process_group() + self.perThreadTearDown() + + + @classmethod + def _join_threads(cls, threads, fn): + timeout = TIMEOUT_DEFAULT + try: + for idx, thread in enumerate(threads): + thread.join(max(0, timeout)) + if thread.is_alive(): + MultiThreadedTestCase.exception_queue.put( + ( + idx, + ( + TimeoutError, + TimeoutError( + f"Rank failed to join in under {timeout} seconds" + ), + None, + ), + ) + ) + ProcessLocalGroup.reset() + failed_ranks = [] + while not cls.exception_queue.empty(): + failure = cls.exception_queue.get() + failed_ranks.append(failure) + finally: + _uninstall_threaded_pg() + torch._C._distributed_c10d._set_thread_isolation_mode(False) + + cls._check_return_codes(failed_ranks, timeout, fn) + + @classmethod + def _check_return_codes(cls, failed_ranks, timeout, fn): + # Print based on exceptions raised from threads + # SkipTest: print info for each thread + # TimeoutError: raise RuntimeError for any timed out thread + # Normal Exception: print error for each thread that raises exception + # and raise a RuntimeError + error_msg = "" + skip_code = -1 + for rank, exc_info in failed_ranks: + exc = exc_info[1] + if isinstance(exc, unittest.SkipTest): + logger.info( + "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) + ) + if skip_code < 0: + skip_code = TEST_SKIPS["generic"].exit_code + elif isinstance(exc, TimeoutError): + msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" + logger.error(msg) + raise RuntimeError(msg) + elif isinstance(exc, Exception): + msg = "".join(traceback.format_exception(*exc_info)) + logger.error( + "Caught exception: \n%s exiting thread %s", msg, rank + ) + error_msg += ( + f"Thread {rank} exited with exception:\n{msg}\n" + ) + elif isinstance(exc, SystemExit): + if type(exc.code) == int and skip_code < 0: + skip_code = exc.code + + # check exceptions + if len(error_msg) > 0: + raise RuntimeError(error_msg) + # check skip + if skip_code > 0: + for skip in TEST_SKIPS.values(): + if skip_code == skip.exit_code: + if IS_SANDCASTLE: + # "pass" the test with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", fn, skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + @property + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def assertEqualOnRank(self, x, y, msg=None, *, rank=0): + """ + The reason why we have this util function instead of + self.assertEqual is all threads are sharing one CPU RNG + so the assertion result is only reliable on rank 0 + """ + if self.rank == rank: + self.assertEqual(x, y, msg) + + def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0): + if self.rank == rank: + self.assertNotEqual(x, y) + + +class SaveForwardInputsModule(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.l = nn.Linear(100, 100) + self.forward_inputs = forward_inputs + self.cast_forward_inputs = cast_forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x) + + +class SaveForwardInputsModel(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.forward_inputs = forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.c2(self.c1(x)) + +@contextmanager +def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True, fake_pg=False): + # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase, + # Just manually implement the most important part of the dynamo behavior to reset/clear. + if not fake_pg: + torch.cuda.set_device(rank) + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '6789' + if init_pg: + if fake_pg: + store = torch.testing._internal.distributed.fake_pg.FakeStore() + c10d.init_process_group( + backend="fake", + world_size=world_size, + rank=rank, + store=store, + ) + else: + c10d.init_process_group("nccl", rank=rank, world_size=world_size) + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + try: + yield + finally: + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + if init_pg: + c10d.destroy_process_group() + + +class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase): + """ + Test harness for single-process dynamo distributed tests, + initializes dist process group. + + Prefer this for simple tests, as it's easier to debug. + """ + + @classmethod + def setUpClass(cls): + super().setUpClass() + # _exit_stack is set up in TestCase + cls._exit_stack.enter_context( + patch.dict( + os.environ, + { + "MASTER_ADDR": "localhost", + "MASTER_PORT": "12355", + }, + ) + ) + cls.rank = 0 + cls.device = f"cuda:{cls.rank}" + cls.device_ids = None if "cuda" in cls.device else [cls.rank] + c10d.init_process_group("nccl", rank=cls.rank, world_size=1) + + @classmethod + def tearDownClass(cls): + c10d.destroy_process_group() + super().tearDownClass() + + +class DynamoDistributedMultiProcTestCase(MultiProcessTestCase): + """ + Use this for tests that actually run on multiple GPUs. + + Decorate tests with @skip_if_lt_x_gpu(ngpu) + + Note: MultiProcTestCase spawns processes per test and is slow. + Prefer MultiThreadedTestCase for most tests. Perhaps use this one + sparingly for integration tests. + """ + def setUp(self): + super().setUp() + self._spawn_processes() + + def tearDown(self): + super().tearDown() + try: + os.remove(self.file_name) + except OSError: + pass + + @property + def world_size(self) -> int: + return torch.cuda.device_count() + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None: + # The rest is copypasta from MultiProcessTestCase._run + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + +class MultiProcContinousTest(TestCase): + # Class variables: + # number of test processes + world_size: int = 2 + # rank of the current process + rank: int = -1 # unset state + # Rendezvous file + rdvz_file: Optional[str] = None + + @classmethod + @abc.abstractmethod + def backend_str(cls) -> str: + """ + ProcessGroup backend str. + To be customized by sub test classes, e.g. "nccl". + Here we raise error. + """ + raise NotImplementedError("Please implement backend_str in your test class") + + @classmethod + def opts(cls, high_priority_stream=False): + """ + ProcessGroup init options. + To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest + Here we return None. + """ + return None + + @classmethod + def setUpClass(cls): + """ + Class-scope test fixture. Run once for entire test class, before any test starts. + Set up the process group. + """ + super().setUpClass() + if not 0 <= cls.rank < cls.world_size: + raise RuntimeError( + "Rank must be set and in the range of 0 to world_size. " + f"World size: {cls.world_size} Rank: {cls.rank}" + ) + if cls.rdvz_file: + store = c10d.FileStore(cls.rdvz_file, cls.world_size) + else: + # torchrun takes care of rendezvous + store = None + opts = cls.opts() + backend = cls.backend_str() + print(f"Testing {backend=}") + # create nccl processgroup with opts + c10d.init_process_group( + backend=backend, + world_size=cls.world_size, + rank=cls.rank, + store=store, + pg_options=opts, + ) + cls.pg = c10d.distributed_c10d._get_default_group() + print(f"Rank {cls.rank} setup complete") + + @classmethod + def tearDownClass(cls): + """ + Class-scope test fixture. Run once for entire test class, after all tests finish. + Tear down the process group. + """ + c10d.destroy_process_group() + super().tearDownClass() + # Clear up the rendezvous file + if cls.rdvz_file: + try: + os.remove(cls.rdvz_file) + except OSError: + pass + print(f"Rank {cls.rank} teardown complete") + + @classmethod + def run_rank( + cls, + rank: int, + world_size: int, + rdvz_file: Optional[str] = None, + ): + """ + This is an entry point for each rank to run the tests in `MultiProcContinousTest`. + In this entry point, we set the class variables for the test class. + Then we run all tests. + + Note: + - This helper only works for a subclass of `MultiProcContinousTest`. + + Example: + - See `test_c10d_ops_nccl.py`. + """ + # set class variables for the test class + cls.rank = rank + cls.world_size = world_size + cls.rdvz_file = rdvz_file + # Launch tests via `common_utils` infra + run_tests() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..f9eff69767931c1c50a36bb12819f9304c295740 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py @@ -0,0 +1,1532 @@ +# mypy: allow-untyped-defs +# Owner(s): ["oncall: distributed"] + +import contextlib +import os +import re +import sys +import warnings +from abc import ABC, abstractmethod +from contextlib import nullcontext +from copy import deepcopy +from enum import auto, Enum +from functools import wraps +from typing import ( + Any, + Callable, + Dict, + List, + no_type_check, + Optional, + Tuple, + Type, + Union, +) +from unittest import mock + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.distributed._composable import checkpoint +from torch.distributed._composable.fsdp import fully_shard +from torch.distributed._composable.fsdp._fsdp_param_group import ( + FSDPParamGroup, + RegisterPostBackwardFunction, +) +from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP +from torch.distributed.fsdp._common_utils import TrainingState +from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + BackwardPrefetch, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler +from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap +from torch.distributed.tensor import distribute_tensor, DTensor, Shard +from torch.distributed.tensor.parallel import ( + ColwiseParallel, + parallelize_module, + RowwiseParallel, + SequenceParallel, +) +from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer +from torch.nn.parallel.distributed import DistributedDataParallel as DDP +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + run_subtests, + TEST_SKIPS, +) +from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms +from torch.utils._triton import has_triton + + +class FSDPInitMode(Enum): + # No FSDP wrapping + NO_FSDP = auto() + # FSDP recursive wrapping + RECURSIVE = auto() + # TODO: FSDP non-recursive wrapping + # NONRECURSIVE = auto() + + +class CUDAInitMode(Enum): + # Move model to CUDA before passing to the FSDP constructor + CUDA_BEFORE = auto() + # Move model to CUDA after passing to the FSDP constructor + CUDA_AFTER = auto() + # Keep on CPU + CUDA_NEVER = auto() + + +class FSDPTestModel(nn.Module, ABC): + """This defines the interface expected from all models used commonly for + FSDP unit tests.""" + + @abstractmethod + def get_input(self, device) -> Tuple[torch.Tensor, ...]: + """Returns an input for the model as as tuple.""" + ... + + @abstractmethod + def get_loss(self, input, output) -> torch.Tensor: + """Returns the loss given the input and output.""" + ... + + @abstractmethod + def run_backward(self, loss) -> None: + """Runs the backward pass (e.g. including ``loss.backward()``).""" + ... + + @staticmethod + @abstractmethod + def init(*args: Any, **kwargs: Any) -> nn.Module: + """Initializes an instance of this model.""" + ... + + +def _assert_module_states( + model: nn.Module, + process_group: dist.ProcessGroup, + assert_fn: Callable, +): + """ + All-gathers module states across ranks and calls ``assert_fn`` on each pair + of corresponding states from rank 0 and a nonzero rank. For example, if + ``assert_fn`` is ``self.assertEqual()``, then this checks that all module + states are equal across ranks. + """ + # Include names for debugging convenience + named_module_states = [ + (param_name, param.detach().cpu()) + for param_name, param in model.named_parameters() + ] + named_module_states += [ + (buffer_name, buffer.detach().cpu()) + for buffer_name, buffer in model.named_buffers() + ] + world_size = dist.get_world_size(process_group) + olist = [None for _ in range(world_size)] + dist.all_gather_object(olist, named_module_states, group=process_group) + rank0_states = olist[0] + assert rank0_states is not None # mypy + for state in olist[1:]: + assert state is not None # mypy + for (_, p1), (_, p2) in zip(rank0_states, state): + assert_fn(p1, p2) + + +def _zero_model( + model: nn.Module, + zero_buffers: bool = False, + summon_full=True, +): + """Zeros the parameters and optionally buffers of ``model`` in place.""" + ctx = FSDP.summon_full_params(model) if summon_full else nullcontext() + with ctx: + for param in model.parameters(): + with torch.no_grad(): + param.zero_() + if zero_buffers: + for buffer in model.buffers(): + with torch.no_grad(): + buffer.zero_() + + +def _get_state_dict(model, cpu_offload=False, half=False): + if not cpu_offload: + model = model.cuda() + if half: + model.half() + + return model.state_dict() + + +def subtest_name(test_name_mapping, *args): + return "_".join( + [test_name_mapping[str(s)] if s is not None else "none" for s in args] + ) + + +def _broadcast_state_dict(rank, state_dict): + # For non-FSDP roots, some parts of the model state on rank 0 may + # not be on CPU, so we move everything to CPU to avoid issues like: + # https://github.com/pytorch/pytorch/issues/77113. + for param_name, param in state_dict.items(): + if param.device != torch.device("cpu"): + state_dict[param_name] = param.cpu() + + olist = [state_dict if rank == 0 else None] + dist.broadcast_object_list(olist) + state_dict = olist[0] + # Ensure that the state is on CUDA + for param_name in state_dict.keys(): + state_dict[param_name] = state_dict[param_name].cuda() + return state_dict + + +def get_full_params(model: nn.Module, recurse: bool = True): + """ + Returns the full unsharded parameters of ``model``. Any FSDP-managed + parameters offloaded to CPU are moved to GPU in the returned list. + + Args: + recurse (bool): If ``False``, only unshards the parameters immediate to + ``model``; if ``True``, recurses through the module hierarchy + rooted at ``model``. + """ + with FSDP.summon_full_params(model, recurse=recurse): + return deepcopy(list(model.parameters())) + + +def _maybe_cuda(model: nn.Module, move_to_cuda: bool): + return model.cuda() if move_to_cuda else model + + +def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs): + return model if not wrap_fsdp else FSDP(model, *args, **kwargs) + + +class DummyProcessGroup: + def __init__(self, rank: int, size: int): + self._rank = rank + self._size = size + + def rank(self) -> int: + return self._rank + + def size(self) -> int: + return self._size + + def allreduce(self, *args, **kwargs): + dist_wait = mock.Mock() + + def get_future(): + future: torch.futures.Future = torch.futures.Future() + future.set_result(1) + return future + + dist_wait.get_future = get_future + return dist_wait + + +class TransformerWithSharedParams(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + cuda_init_mode: CUDAInitMode, + add_bn: bool, + deterministic: bool, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + if deterministic: + torch.manual_seed(0) + d_vocab = 23 + d_model = 16 + + self.embed_tokens = nn.Embedding(d_vocab, d_model) + self.transformer = nn.Transformer( + d_model=d_model, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=8, + dropout=0.1, + ) + self.output_proj = nn.Linear(d_model, d_vocab) + + # share the embedding and output projection weights + self.output_proj.weight = self.embed_tokens.weight + self.register_buffer( + "vocab_bias", self.embed_tokens.weight.new_ones((d_model,)) + ) + self.register_buffer( + "long_buffer", + torch.zeros_like(self.vocab_bias, dtype=torch.long), + ) # type: ignore[arg-type] + + self.bs = 2 + self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity() + if cuda_init_mode == CUDAInitMode.CUDA_BEFORE: + self = self.cuda() + if deterministic: + self.eval() + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + src = torch.arange(12, device=device).view(6, self.bs) # T x B + tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B + return (src, tgt) + + def forward(self, src_ids, tgt_ids): + src = self.embed_tokens(src_ids) + src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator] + tgt = self.embed_tokens(tgt_ids) + tgt = self.bn(tgt) + x = self.transformer(src, tgt) + return self.output_proj(x) + + def get_loss(self, input, output): + _, tgt = input + return nn.functional.cross_entropy( + output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum" + ) + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + add_bn: bool = True, + ) -> Union[nn.Module, FSDP]: + """ + Initializes a :class:`TransformerWithSharedParams` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps with + top-level FSDP. By default, the top-level FSDP uses the + ``ModuleWrapPolicy`` for encoder and decoder layers, but a + different auto wrap policy may be specified via + ``fsdp_kwargs``. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + add_bn (bool): Whether to include batch norm in the model. + """ + + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + if isinstance(group, tuple): + pg = group[0] + else: + pg = group + return TransformerWithSharedParams( + pg, cuda_init_mode, add_bn, deterministic + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Default to the `ModuleWrapPolicy` + if "auto_wrap_policy" not in fsdp_kwargs: + auto_wrap_policy = ModuleWrapPolicy( + { + TransformerEncoderLayer, + TransformerDecoderLayer, + } + ) + else: + auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy") + + if ( + "sharding_strategy" in fsdp_kwargs + and fsdp_kwargs["sharding_strategy"] + in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2} + and not isinstance(group, tuple) + ): + fsdp_pg = None + else: + fsdp_pg = group + + if isinstance(group, tuple): + tformer_pg = group[0] + else: + tformer_pg = group + + m = TransformerWithSharedParams( + tformer_pg, cuda_init_mode, add_bn, deterministic + ) + fsdp_model = FSDP( + m, + fsdp_pg, + auto_wrap_policy=auto_wrap_policy, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + def get_ignored_modules(self): + return [self.transformer] + + +class NestedWrappedModule(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ) + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + return (torch.rand(4, 8, device=device),) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = output.sum() + return loss + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ) -> nn.Module: + """ + Initializes a :class:`NestedWrappedModule` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP but not the top-level module. The model may + later be wrapped with a top-level FSDP external to this method + if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return NestedWrappedModule( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = NestedWrappedModule( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class AlwaysWrapNestedWrappedModule(NestedWrappedModule): + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this + wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap + policy. + """ + model = super( + AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule + ).init( + group=group, + fsdp_init_mode=FSDPInitMode.NO_FSDP, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + ) + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + fsdp_kwargs = fsdp_kwargs or {} + fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + + +class NonUniformReqGradNWM(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super(NestedWrappedModule, self).__init__() + # This `__init__` only differs from `NestedWrappedModule.__init__` in that + # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential` + # container. This arrangement results in all elements of the last two parameters + # residing on a single rank. Freezing all parameters except those two allows us + # to verify that `ShardedGradScaler` accommodates situations where some ranks + # have no (non-zero sized) parameter shards. + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap( + nn.Sequential( + _maybe_cuda(nn.Linear(16, 4), move_to_cuda), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ), + ), + ) + + @staticmethod + def _set_nonuniform_req_grad(model, req_grad_mask) -> None: + for n, p in model.named_parameters(): + if not re.match(req_grad_mask, n): + p.requires_grad_(False) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential` + container to enable the desired non-uniform ``requires_grad`` + ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP`` + init modes, freezes all parameters except the last two to validate + ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in + FSDP ``use_orig_params=True`` mode. + """ + # The parameters that should remain unfrozen are in `module.2.1`. The regex + # pattern below matches the relevant parameter names both with and without + # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present. + req_grad_pattern = re.compile(r"module\.2.*\.1.*") + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + ddp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern) + return ddp_model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + if fsdp_kwargs is None: + fsdp_kwargs = {} + fsdp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern) + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class ModuleWithDelay(FSDPTestModel): + """This class wraps a :class:`FSDPTestModel` to optionally add a delay + after computing the loss and/or before the gradient reduction.""" + + def __init__( + self, + module: nn.Module, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + ): + super().__init__() + self.delay_after_loss_ms = delay_after_loss_ms + self.delay_before_reduction_ms = delay_before_reduction_ms + self.module = module + + def get_input(self, device): + return self.module.get_input(device) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = self.module.get_loss(input, output) + if self.delay_after_loss_ms > 0: + torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms())) + return loss + + def run_backward(self, loss): + orig_reduce_scatter = torch.distributed.reduce_scatter_tensor + + def _delayed_reduce_scatter(*args, **kwargs): + if self.delay_before_reduction_ms > 0: + torch.cuda._sleep( + int(self.delay_before_reduction_ms * get_cycles_per_ms()) + ) + return orig_reduce_scatter(*args, **kwargs) + + with mock.patch( + "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter + ): + self.module.run_backward(loss) + + @staticmethod + def init( + module_class: Type[FSDPTestModel], + *model_args: Any, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + **model_kwargs: Any, + ): + """ + Args: + module_class (Type[FSDPTestModel]): Wrapped module class to which + to add delays. + model_args: Positional arguments forwarded to the ``module_class`` + ``init()``. + delay_after_loss_ms (int): Delay after computing the loss/before + the optimizer step (in ms). + delay_before_reduction_ms (int): Delay before reduce-scattering + gradients (in ms). + model_kwargs: Keyword arguments forwarded to the ``module_class`` + ``init()``. + """ + return ModuleWithDelay( + module_class.init(*model_args, **model_kwargs), + delay_after_loss_ms, + delay_before_reduction_ms, + ) + + +class NestedWrappedModuleWithDelay(ModuleWithDelay): + @staticmethod + def init( # type: ignore[override] + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_after_loss_ms: int = 0, + delay_before_reduction_ms: int = 0, + ): + return ModuleWithDelay.init( + NestedWrappedModule, + group=group, + fsdp_init_mode=fsdp_init_mode, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + delay_after_loss_ms=delay_after_loss_ms, + delay_before_reduction_ms=delay_before_reduction_ms, + ) + + +class DummyDDP(nn.Module): + def __init__(self, module): + super().__init__() + self.module = module + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + +class MixtureOfExperts(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + delay_before_free_ms: int, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__( + group=group, + wrap_fsdp=wrap_fsdp, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + self.group = group + self.delay_before_free_ms = delay_before_free_ms + self.wrap_fsdp = wrap_fsdp + self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + if deterministic: + # Give each rank different expert parameters + torch.manual_seed(42 + self.rank) + d_expert = 23 + d_shared = 12 + d_input = 8 + expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda) + + self.num_expert_params = sum(p.numel() for p in expert.parameters()) + for p in expert.parameters(): + p.expert = True # type: ignore[attr-defined] + + if deterministic: + # Keep all other parameters the same across ranks + torch.manual_seed(0) + + shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda) + + if wrap_fsdp: + # we create a process group of size 1 for the expert params + expert_group = torch.distributed.new_group( + [group.rank()] + ) # world size 1 means no shard + expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment] + shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment] + + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda), + shared, + expert, + _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda), + ) + + def forward(self, x): + if self.delay_before_free_ms > 0: + expert = self.module[2] + if isinstance(expert, FSDP): + orig_reshard = torch.distributed.fsdp._runtime_utils._reshard + + def _delayed_reshard(*args, **kwargs): + torch.cuda._sleep( + int(self.delay_before_free_ms * get_cycles_per_ms()) + ) + return orig_reshard(*args, **kwargs) + + # This patch covers any `import torch..._reshard` uses. + with mock.patch( + "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard + ): + return self.module(x) + + return self.module(x) + + def run_backward(self, loss): + loss.backward() + # Manually reduce gradients if not wrapped in FullyShardedDataParallel + if not self.wrap_fsdp: + with torch.no_grad(): + for p in self.parameters(): + if hasattr(p, "expert"): + continue # these params don't need grad reduction + if p.grad is not None: + p.grad.div_(self.world_size) + torch.distributed.all_reduce(p.grad, group=self.group) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_before_free_ms: int = 0, + ): + """ + Initializes a :class:`MixtureOfExperts` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP, including the expert and shared layers, but + not the top-level module. The model may later be wrapped with a + top-level FSDP external to this method if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + delay_before_free_ms (int): Delay before resharding expert + parameters in the forward pass (in ms). + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return MixtureOfExperts( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = MixtureOfExperts( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class MLP(nn.Module): + def __init__( + self, + dim: int, + device: Optional[torch.device] = None, + *, + bias: bool = True, + with_buffer: bool = False, + dim_multiplier: int = 4, + ): + super().__init__() + self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias) + self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias) + if with_buffer: + self.register_buffer("buffer", torch.randn((dim,), device=device)) + else: + self.buffer = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + z = self.in_proj(x) + z = F.relu(z) + z = self.out_proj(z) + z = F.relu(z) + if self.buffer is not None: + z = z + self.buffer + return z + + def reset_parameters(self): + if self.buffer is not None: + torch.nn.init.normal_(self.buffer) + + +class MLPStack(nn.Sequential): + def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False): + modules: List[nn.Module] = [ + # Use multiplier of 3 to exercise uneven case + MLP(mlp_dim, dim_multiplier=3), + MLP(mlp_dim), + MLP(mlp_dim, dim_multiplier=3), + ] + if with_seq_parallel: + modules.append(nn.LayerNorm(mlp_dim, bias=False)) + super().__init__(*modules) + self.with_seq_parallel = with_seq_parallel + + def parallelize( + self, + tp_mesh: DeviceMesh, + dp_mesh: DeviceMesh, + use_activation_checkpointing: bool, + **fsdp_kwargs, + ) -> "MLPStack": + parallelize_plan = { + # Pass `use_local_output=False` to keep as DTensor to preserve + # uneven activation dims + "0.in_proj": ColwiseParallel(use_local_output=False), + "0.out_proj": RowwiseParallel(use_local_output=False), + "1.in_proj": ColwiseParallel(use_local_output=False), + "1.out_proj": RowwiseParallel(use_local_output=False), + "2.in_proj": ColwiseParallel(use_local_output=False), + "2.out_proj": RowwiseParallel(output_layouts=Shard(1)) + if self.with_seq_parallel + else RowwiseParallel(), + } + if self.with_seq_parallel: + parallelize_plan["3"] = SequenceParallel(sequence_dim=1) + parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan) + for module in self: + if isinstance(module, nn.LayerNorm): + continue + if use_activation_checkpointing: + checkpoint(module) + fully_shard(module, mesh=dp_mesh, **fsdp_kwargs) + fully_shard(self, mesh=dp_mesh, **fsdp_kwargs) + return self + + +class DoubleLinear(nn.Module): + """ + This can be used for returning multiple outputs from a module + (``use_second_linear=True``) or for having an unused module (``False``). + """ + + def __init__(self, dim: int, use_second_linear: bool = True): + super().__init__() + self.lin1 = nn.Linear(dim, dim) + self.lin2 = nn.Linear(dim, dim) + self.relu = nn.ReLU() + self.use_second_linear = use_second_linear + + def forward( + self, x: torch.Tensor + ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]: + if self.use_second_linear: + return self.relu(self.lin1(x)), self.relu(self.lin2(x)) + return self.relu(self.lin1(x)) + + +# NOTE: For these patch methods, if we want safety under multi-threading (e.g. +# when using multi-threaded process group), then we want: +# (1) a barrier immediately after reading the original value to ensure that all +# threads see the same original value +# (2) a barrier immediately before restoring the original value to ensure that +# all threads use the patched value inside the context +@contextlib.contextmanager +def patch_all_gather(new_all_gather_into_tensor: Callable): + orig_all_gather = dist.all_gather_into_tensor + dist.barrier() + dist.all_gather_into_tensor = new_all_gather_into_tensor + try: + yield + finally: + dist.barrier() + dist.all_gather_into_tensor = orig_all_gather + + +@contextlib.contextmanager +def patch_reduce_scatter(new_reduce_scatter_tensor: Callable): + orig_reduce_scatter = dist.reduce_scatter_tensor + dist.barrier() + dist.reduce_scatter_tensor = new_reduce_scatter_tensor + try: + yield + finally: + dist.barrier() + dist.reduce_scatter_tensor = orig_reduce_scatter + + +@contextlib.contextmanager +def patch_all_reduce(new_all_reduce: Callable): + orig_all_reduce = dist.all_reduce + dist.barrier() + dist.all_reduce = new_all_reduce + try: + yield + finally: + dist.barrier() + dist.all_reduce = orig_all_reduce + + +@no_type_check +@contextlib.contextmanager +def patch_unshard(new_unshard: Callable): + orig_unshard = FSDPParamGroup.unshard + dist.barrier() + FSDPParamGroup.unshard = new_unshard + try: + yield + finally: + dist.barrier() + FSDPParamGroup.unshard = orig_unshard + + +@no_type_check +@contextlib.contextmanager +def patch_reshard(new_reshard: Callable): + orig_reshard = FSDPParamGroup.reshard + dist.barrier() + FSDPParamGroup.reshard = new_reshard + try: + yield + finally: + dist.barrier() + FSDPParamGroup.reshard = orig_reshard + + +@no_type_check +@contextlib.contextmanager +def patch_post_backward(new_post_backward: Callable): + orig_post_backward = FSDPParamGroup.post_backward + dist.barrier() + FSDPParamGroup.post_backward = new_post_backward + try: + yield + finally: + dist.barrier() + FSDPParamGroup.post_backward = orig_post_backward + + +@no_type_check +@contextlib.contextmanager +def patch_register_post_backward_hook_backward(new_backward: Callable): + orig_backward = RegisterPostBackwardFunction.backward + dist.barrier() + RegisterPostBackwardFunction.backward = new_backward + try: + yield + finally: + dist.barrier() + RegisterPostBackwardFunction.backward = orig_backward + + +def reduce_scatter_with_assert( + cls, + orig_reduce_scatter: Callable, + assert_fn: Callable, # `assert_fn(output: Tensor)` + *args: Any, + **kwargs: Any, +): + if len(args) > 0: + output = args[0] + elif "output" in kwargs: + output = kwargs["output"] + else: + raise AssertionError( + f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}" + ) + assert_fn(output) + return orig_reduce_scatter(*args, **kwargs) + + +def check_sharded_parity( + cls, # unit test class + replicated_module: nn.Module, + sharded_module: nn.Module, + prefixes_to_ignore: Tuple[str, ...] = (), +): + for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip( + replicated_module.named_parameters(), sharded_module.named_parameters() + ): + clean_sharded_name = sharded_name + for prefix in prefixes_to_ignore: + clean_sharded_name = clean_sharded_name.replace(prefix, "") + cls.assertEqual(replicated_name, clean_sharded_name) + cls.assertIsInstance(sharded_param, DTensor) + assert isinstance(sharded_param, DTensor) # mypy + mesh, placements = sharded_param.device_mesh, sharded_param.placements + if tuple(placements) == (Shard(0), Shard(0)): + raise AssertionError( + "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), " + "so we cannot check for equality using it" + ) + sharded_ref_param = distribute_tensor(replicated_param, mesh, placements) + cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local()) + if replicated_param.grad is None: + cls.assertIsNone(sharded_param.grad) + continue + cls.assertIsNotNone(sharded_param.grad) + sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements) + cls.assertIsInstance(sharded_param.grad, DTensor) + assert isinstance(sharded_param.grad, DTensor) # mypy + cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local()) + + +class FSDPTestMultiThread(MultiThreadedTestCase): + @property + def world_size(self): + return torch.cuda.device_count() if torch.cuda.is_available() else 4 + + def setUp(self): + super().setUp() + self._spawn_threads() + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + def perThreadSetUp(self): + torch._dynamo.reset() + + def perThreadTearDown(self): + torch._dynamo.reset() + + +class FSDPTest(MultiProcessTestCase): + def setUp(self): + super().setUp() + # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`, + # which can cause unit test flakiness: + # https://github.com/pytorch/pytorch/issues/90848 + os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0" + self._spawn_processes() + + @property + def world_size(self): + return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4 + + @property + def process_group(self): + return dist.distributed_c10d._get_default_group() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + def _check_cpu_offload(self, fsdp_model, cpu_offload): + self.assertEqual(cpu_offload, fsdp_model.cpu_offload) + + def _check_backward_prefetch(self, fsdp_model, backward_prefetch): + self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch) + + def _check_forward_prefetch(self, fsdp_model, forward_prefetch): + self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + @classmethod + def _run(cls, rank, test_name, file_name, pipe, **kwargs): + self = cls(test_name) + self.rank = rank + self.file_name = file_name + fake_pg = kwargs.get("fake_pg", False) + + print(f"dist init r={self.rank}, world={self.world_size}") + + # Specify gloo backend to make 'init_process_group()' succeed, + # Actual tests will be skipped if there is no enough GPUs. + backend = "nccl" if torch.cuda.is_available() else "gloo" + + try: + if fake_pg: + store = torch.testing._internal.distributed.fake_pg.FakeStore() + dist.init_process_group( + backend="fake", + world_size=self.world_size, + rank=rank, + store=store, + ) + else: + dist.init_process_group( + init_method=self.init_method, + backend=backend, + world_size=int(self.world_size), + rank=self.rank, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + device_ids = None + if torch.cuda.is_available() and torch.cuda.device_count(): + device_id = self.rank % torch.cuda.device_count() + torch.cuda.set_device(device_id) + device_ids = [device_id] + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + dist.barrier(device_ids=device_ids) + + torch._dynamo.reset() + self.run_test(test_name, pipe) + torch._dynamo.reset() + + dist.barrier(device_ids=device_ids) + + dist.destroy_process_group() + + def _train_for_several_steps( + self, + model: nn.Module, + num_steps: int, + autocast: bool, + lr: float = 0.01, + fsdp_cpu_offload: Optional[CPUOffload] = None, + save_model: bool = False, + mixed_precision: Optional[MixedPrecision] = None, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + ): + cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params + + model_device = next(model.parameters()).device + if sharded_grad_scaler_kwargs is None: + sharded_grad_scaler_kwargs = {} + sharded_grad_scaler = ShardedGradScaler( + enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs + ) + # use SGD with momentum instead of Adam, since Adam is scale invariant + # and this makes it bad for tests + optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) + for _ in range(num_steps): + optim.zero_grad() + with torch.amp.autocast("cuda", enabled=autocast): + # Inputs always cuda regardless of cpu offloading, or model.device + input = model.module.get_input(torch.device("cuda")) + if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)): + if isinstance(input, torch.Tensor): + input = input.half() + else: + input = tuple(x.half() for x in input) + output = model(*input) + # Post-forward, if CPU offloading model param should be on CPU. + if ( + cpu_offload_params + and isinstance(model, FSDP) + # If not resharding after forward, the parameters are still + # exposed as unsharded views into the GPU flat parameter + and model.sharding_strategy + not in NO_RESHARD_AFTER_FORWARD_STRATEGIES + ): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + + loss = model.module.get_loss(input, output).to(model_device) + loss = sharded_grad_scaler.scale(loss) + + if not mixed_precision and not use_pure_fp16: + assert ( + loss.dtype == torch.float32 + ), "loss data type should be float32, as the original \ + parameter data type is float32." + else: + if use_pure_fp16: + self.assertEqual(loss.dtype, torch.float16) + # FSDP loss is fp16, DDP AMP loss is fp32 + elif isinstance(model, FSDP): + assert mixed_precision is not None # mypy + self.assertEqual(loss.dtype, mixed_precision.param_dtype) + else: + self.assertEqual(loss.dtype, torch.float32) + model.module.run_backward(loss) + # Post-backward, if CPU offloading model params should be on CPU. + if cpu_offload_params and isinstance(model, FSDP): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + # Unscale the gradients and step + sharded_grad_scaler.step(optim) + # Update the scale factor + sharded_grad_scaler.update() + # if save_model, simulate save + load. + if save_model: + state_dict = {k: v.clone() for k, v in model.state_dict().items()} + # Zero params, if save/load state_dict did not work properly, this + # would break the parity test with DDP. + _zero_model(model) + model.load_state_dict(state_dict) + + if isinstance(model, FSDP): + model._assert_state(TrainingState.IDLE) + return loss.detach() # type: ignore[possibly-undefined] + + def _test_fsdp_parity( + self, + model_class: Type[FSDPTestModel], + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + ref_init_fn: Optional[Callable] = None, + num_iters: int = 2, + save_model: bool = True, + cpu_offload: CPUOffload = CPUOffload(), + backward_prefetch: Optional[BackwardPrefetch] = None, + sharding_strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + forward_prefetch: bool = False, + use_orig_params: bool = False, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + init_kwargs: Optional[Dict[str, Any]] = None, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + **fsdp_kwargs, + ): + """ + Tests FSDP training against a reference, which defaults to DDP but + may be customized with ``ref_init_fn``. + + Args: + model_class (Type[FSDPTestModel]): A model class that inherits from + ``FSDPTestModel``, which defines the expected interface. + fsdp_init_mode (FSDPInitMode): The mode to initialize the + FSDP-wrapped model. This should not be ``NO_FSDP``. + ref_init_fn (Optional[Callable]): A callable to invoke that wraps a + non-wrapped model to construct the reference model, where this + wrapper should provide data parallel semantics. If ``None``, + then the callable defaults to the DDP constructor. + """ + assert ( + fsdp_init_mode != FSDPInitMode.NO_FSDP + ), "Expects an FSDP init mode that wraps with FSDP" + if init_kwargs is None: + init_kwargs = {} + lr = 1e-2 + rank = self.process_group.rank() + # Establish reference behavior with DDP + model = model_class.init( + self.process_group, + FSDPInitMode.NO_FSDP, + CUDAInitMode.CUDA_BEFORE, + deterministic=True, + **init_kwargs, + ) + if ref_init_fn is None: + ref_model = DDP(model, device_ids=[rank], output_device=rank) + else: + ref_model = ref_init_fn(model) + if use_pure_fp16: + ref_model = ref_model.half() + ref_loss = self._train_for_several_steps( + ref_model, + num_iters, + autocast=mixed_precision is not None, + lr=lr, + fsdp_cpu_offload=cpu_offload, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + ddp_params = list(ref_model.parameters()) + # Check against FSDP behavior + fsdp_kwargs.update( + { + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "sharding_strategy": sharding_strategy, + "mixed_precision": mixed_precision, + "forward_prefetch": forward_prefetch, + "use_orig_params": use_orig_params, + } + ) + try: + fsdp_model = model_class.init( + self.process_group, + fsdp_init_mode, + cuda_init_mode, + fsdp_kwargs, + deterministic=True, + **init_kwargs, + ) + except Exception as e: + raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e + if not isinstance(fsdp_model, FSDP): + # Enforce that we wrap with top-level FSDP since we are comparing + # assuming a data parallel reference and some test models may not + # do so in their `init()` method + fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs) + if use_pure_fp16: + # Change the model parameter dtype after FSDP initialization + fsdp_model = fsdp_model.half() + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + offload_params = cpu_offload is not None and cpu_offload.offload_params + # Offloading parameters with `CUDA_AFTER` should raise an error during + # lazy initialization due to the parameter devices not being CPU; + # otherwise, all parameter devices should be CPU + expects_device_error = ( + offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER + ) + expects_cpu_device = ( + offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER + ) + if expects_cpu_device: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + context = ( + self.assertRaisesRegex( + RuntimeError, + "An FSDP-managed module with parameter CPU offloading enabled " + "has parameters on cuda", + ) + if expects_device_error + else nullcontext() + ) + with context: + fsdp_loss = self._train_for_several_steps( + fsdp_model, + num_iters, + autocast=False, + lr=lr, + fsdp_cpu_offload=cpu_offload, + save_model=save_model, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + # No need to check for parameter and loss parity if expecting an error + if expects_device_error: + return + # Check parameter devices are CPU if offloading to CPU before calling + # `get_full_params()`, which will cast the parameters to FP32 + if offload_params: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + fsdp_loss = fsdp_loss.cuda() + fsdp_unsharded_params = get_full_params(fsdp_model) + # Do not check dtype since the reference DDP loss may not be the same + # dtype as the FSDP loss in the case of mixed precision + torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False) + # Do not check for parameter parity if using mixed precision since (1) + # the DDP parameters are in FP16 (from `half()`) while the FSDP + # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs + # the optimizer in FP16 while FSDP runs it in FP32 + # TODO: Disable checking the parameters for pure FP16 due to floating + # point inaccuracy. Note that this means that the backward pass is not + # checked: https://github.com/pytorch/pytorch/issues/90784 + if mixed_precision is None and not use_pure_fp16: + self.assertEqual( + ddp_params, + fsdp_unsharded_params, + exact_device=True, + msg="FSDP did not match DDP", + ) + + +def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None): + def fully_shard_with_compiled_compute(*args, **kwargs): + torch.distributed._composable.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator] + if compile_compute_on_module is None or isinstance( + args[0], compile_compute_on_module + ): + args[0].compile() + + class FullyShardMode(Enum): + EAGER = auto() + COMPILED_COMPUTE = auto() + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + original_fully_shard = torch.distributed._composable.fsdp.fully_shard + for mode in FullyShardMode: + if mode != FullyShardMode.EAGER and not has_triton(): + warnings.warn("Inductor on GPU needs Triton and recent GPU arch") + continue + # barrier to ensure thread reading the same value + original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks + original_compile_threads = torch._inductor.config.compile_threads + torch.distributed.barrier() + + if mode == FullyShardMode.EAGER: + fully_shard_patch = original_fully_shard + elif mode == FullyShardMode.COMPILED_COMPUTE: + torch._dynamo.config.skip_fsdp_hooks = True + torch._inductor.config.compile_threads = 1 + fully_shard_patch = fully_shard_with_compiled_compute # type: ignore[assignment] + else: + raise NotImplementedError( + f"Need to implement FullyShardMode={mode}" + ) + + # fully_shard is imported as a global + # through `from ... import fully_shard` + func.__globals__[original_fully_shard.__name__] = fully_shard_patch + func(*args, **kwargs) + # other threads use patched func before this thread restores + torch.distributed.barrier() + func.__globals__[original_fully_shard.__name__] = original_fully_shard + torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks + torch._inductor.config.compile_threads = original_compile_threads + + return wrapper + + return decorator + + +class SkipModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.lin(x) + + +class NestedLinear(nn.Module): + def __init__(self, fsdp_wrap): + super().__init__() + if fsdp_wrap: + self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda()) + else: + self.nested_linear = nn.Linear(10, 10, bias=False).cuda() + + def forward(self, x): + return self.nested_linear(x) + + +class SkipModel(nn.Module): + def __init__(self, double_nest): + super().__init__() + self.linear = nn.Linear(10, 10, bias=False).cuda() + self.linear_skip = SkipModule().cuda() + self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest)) + + def forward(self, x): + x = self.linear(x) + x = self.linear_skip(x) + x = self.nested_linear(x) + return x diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..8a676c7e16c780b52f156cece3a7b7347c6f465b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py @@ -0,0 +1,323 @@ +# mypy: ignore-errors + +# Torch +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized + +# Testing utils +from torch.testing._internal.common_dtype import floating_and_complex_types_and +from torch.testing._internal.common_utils import TestCase, \ + freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from itertools import chain +from typing import List, Union +from torch._C import TensorType + +import io + +def check_output_types(self, func, ref_outputs, args, kwargs): + graph = getattr(func, 'last_graph', None) + types = [o.type() for o in graph.outputs()] + self.assertTrue(len(types) == 1) + t = types[0] + torch._C._jit_assert_is_instance(ref_outputs, t) + +# Test names in this set are only checked for a single derivative +nn_functional_single_grad = frozenset('test_nn_' + name for name in [ + 'pdist', + 'multilabel_margin_loss', + 'max_unpool3d', + 'multi_margin_loss', + 'binary_cross_entropy', + 'binary_cross_entropy_size_average', + 'ctc_loss', + 'grid_sample', +]) + +def check_against_reference(self, func, reference_func, output_func, args, kwargs=None, + allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False): + """Verifies a function performs identically to some reference implementation. + + Commonly, this is used to verify that a JIT implementation + (output_func) matches the behavior of the eager implementation + (reference_func). + """ + kwargs = kwargs if kwargs else {} + + def allSum(vs): + if isinstance(vs, torch.Tensor): + vs = (vs,) + return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum() + for i, v in enumerate(vs) + if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16)) + + def clone_tensor(t, preserve_requires_grad): + require_grad = preserve_requires_grad and t.requires_grad + return t.detach().clone().requires_grad_(require_grad) + + def clone_inputs(preserve_requires_grad: bool): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(clone_tensor(arg, preserve_requires_grad)) + elif is_iterable_of_tensors(arg): + inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg]) + else: + inputs.append(arg) + + return inputs + + # Returns tensors in args that requires_grad, including tensors in TensorList args + def get_recording_tensors(args): + recording_tensors: List[torch.Tensor] = [] + + for arg in args: + if isinstance(arg, torch.Tensor) and arg.requires_grad: + recording_tensors.append(arg) + elif is_iterable_of_tensors(arg): + recording_tensors.extend(filter(lambda t: t.requires_grad, arg)) + + return recording_tensors + + # test no gradients case + nograd_inputs = clone_inputs(preserve_requires_grad=False) + outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs) + with enable_profiling_mode_for_profiling_tests(): + outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs) + self.assertEqual(outputs, outputs_test) + + if check_types: + check_output_types(self, func, outputs_test, nograd_inputs, kwargs) + + if no_grad: + # skip grad tests + return + + with enable_profiling_mode_for_profiling_tests(): + # test single grad case + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + grads = torch.autograd.grad(allSum(outputs), recording_tensors, + allow_unused=allow_unused) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + # test the grad grad case + if self._testMethodName in nn_functional_single_grad or no_gradgrad: + return + + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + l1 = allSum(outputs) + grads = torch.autograd.grad(l1, recording_tensors, create_graph=True, + allow_unused=allow_unused) + + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused) + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + l1_test = allSum(outputs_test) + grads_test = torch.autograd.grad( + l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused) + + l2_test = (allSum(grads_test) * l1_test) + grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + for g2, g2_test in zip(grads2, grads2_test): + if g2 is None and g2_test is None: + continue + self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4) + +class JitCommonTestCase(TestCase): + def createFunctionFromGraph(self, trace): + graph = trace if isinstance(trace, torch._C.Graph) else trace.graph() + return torch._C._create_function_from_graph("forward", graph) + + def assertExportImport(self, trace, inputs): + m = self.createFunctionFromGraph(trace) + self.assertExportImportModule(m, inputs) + + def assertExportImportModule(self, m, inputs): + m_import = self.getExportImportCopy(m) + a = self.runAndSaveRNG(m, inputs) + b = self.runAndSaveRNG(m_import, inputs) + self.assertEqual(a, b, "Results of original model and " + "exported/imported version of model differed") + + def runAndSaveRNG(self, func, inputs, kwargs=None): + kwargs = kwargs if kwargs else {} + with freeze_rng_state(): + results = func(*inputs, **kwargs) + return results + + def getExportImportCopy(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + torch.jit.save(m, buffer) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + + if not also_test_file: + return imported + + with TemporaryFileName() as fname: + torch.jit.save(imported, fname) + return torch.jit.load(fname, map_location=map_location) + + def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph, + fusion_nodes_not_found, non_fusible_nodes_being_fused, + fusion_nodes_found, nodes_in_diff_graph): + err_msg = "\nFailure in testing nodes' autodifferentiation. " + if should_autodiff_node: + err_msg += "One or more nodes were expected to be autodiffed, " \ + "but were not found in specified fusible/nonfusible " \ + "DifferentiableGraph groups. \nSpecifically:" + # The node is intended to appear in a differentiable graph but doesn't + diff_nodes_missing = [] + # The node is intended to appear in a differentiable graph + # outside of a fusion group but instead is in a fusion group + diff_nodes_in_fusion = [] + # The node is intended to appear in a fusion group but doesn't + fusion_nodes_missing = [] + # The node is intended to appear in a fusion group but instead + # is just in an outer differentiable graph + fusion_nodes_in_diff = [] + for node in nodes_not_in_diff_graph: + if node in non_fusible_nodes_being_fused: + diff_nodes_in_fusion.append(node) + else: + diff_nodes_missing.append(node) + for node in fusion_nodes_not_found: + if node in nodes_in_diff_graph: + fusion_nodes_in_diff.append(node) + else: + fusion_nodes_missing.append(node) + if len(diff_nodes_missing) > 0: + err_msg += f"\n {diff_nodes_missing} were not in one of the " \ + "DifferentiableGraphs when they were expected to be. " \ + "Did you intend for these nodes to be autodiffed? " \ + "If not, remove them from the list of nonfusible nodes." + if len(diff_nodes_in_fusion) > 0: + err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \ + "when they were expected to be just in a DifferentiableGraph. If it was " \ + "intended for these nodes to be in FusionGroups, reclassify these nodes as " \ + "fusible nodes. If these nodes were not intended to be fused, your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_missing) > 0: + err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be. " \ + "They were also not found in an outer DifferentiableGraph. Did you " \ + "intend for these nodes to be autodifferentiated? If not, you should " \ + "remove these nodes from the test's fusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_in_diff) > 0: + err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be, " \ + "instead they were found just in an outer DifferentiableGraph. " \ + "Did you intend for these nodes to be fused? If not, you should " \ + "move these nodes into the test's nonfusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + else: + err_msg += "One or more nodes were not expected to be autodiffed " \ + "but were found in a DifferentiableGraph or in a FusionGroup " \ + "of a DifferentiableGraph. Did you intend for these nodes to be " \ + "autodiffed? If so, change this test to expect autodifferentiation. " \ + "\nSpecifically:" + if len(fusion_nodes_found) > 0: + err_msg += f"\n {fusion_nodes_found} were not expected to be in " \ + "one of the DifferentiableGraphs, but appeared in a FusionGroup " \ + "of a DifferentiableGraph. " + if len(nodes_in_diff_graph) > 0: + err_msg += f"\n {nodes_in_diff_graph} were not expected to " \ + "be in one of the DifferentiableGraphs but were." + return err_msg + + def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes): + diff_nodes = graph.findAllNodes('prim::DifferentiableGraph') + diff_subgraphs = [node.g('Subgraph') for node in diff_nodes] + + # Note: currently no tests have fusible_nodes + fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs])) + fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes] + + # For any non-fusible node, it must show up in one of the DifferentiableGraphs. + nodes_in_diff_graph = [] + nodes_not_in_diff_graph = [] + non_fusible_nodes_being_fused = [] + for node in nonfusible_nodes: + if any(g.findNode(node) is not None for g in diff_subgraphs): + nodes_in_diff_graph.append(node) + else: + nodes_not_in_diff_graph.append(node) + if any(g.findNode(node) is not None for g in fusion_subgraphs): + non_fusible_nodes_being_fused.append(node) + found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes) + + # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs. + fusion_nodes_found = [] + fusion_nodes_not_found = [] + for node in fusible_nodes: + if any(g.findNode(node) is not None for g in fusion_subgraphs): + fusion_nodes_found.append(node) + else: + fusion_nodes_not_found.append(node) + found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes) + + if should_autodiff_node is not None: + err_msg = self.autoDiffErrorMessage(should_autodiff_node, + nodes_not_in_diff_graph, + fusion_nodes_not_found, + non_fusible_nodes_being_fused, + fusion_nodes_found, + nodes_in_diff_graph) + self.assertEqual(should_autodiff_node, + found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg) + + def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]], + traced_graph, assert_propagation, constant_prop=True): + # repropagte input shapes provided by tracing, + prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled() + for enable_test_mode in [True, False]: + # here we are testing allowing/disallowing substituting in complete shapes as constants, + # disallowing constants helps stress test partial eval and substitution pipeline + torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode) + torch._C._jit_erase_non_input_shape_information(traced_graph) + if constant_prop: + torch._C._jit_pass_constant_propagation(traced_graph) + torch._C._jit_pass_propagate_shapes_on_graph(traced_graph) + # Add sizes to default tensor type to avoid checking something out of scope + # and difficulties with tracer leaving in other parts of tensor type + output = next(traced_graph.outputs()).type() + + def test_type(type, actual_size): + sizes = type.symbolic_sizes() + out_type = TensorType.get().with_sizes(sizes) + actual_type = TensorType.get().with_sizes(actual_size) + + # always check actual shape is a subtype of the output + self.assertTrue(actual_type.isSubtypeOf(out_type)) + + # and then if assertion flag is provided, check shape analysis + # is successful + if assert_propagation: + self.assertEqual(out_type.sizes(), actual_size) + + if output.isSubtypeOf(torch._C.TensorType.get()): + test_type(output, out_sizes) + else: + tuple_elements = output.elements() + for i in range(len(tuple_elements)): + test_type(tuple_elements[i], out_sizes[i]) + + torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb81572eeed0a04bc5d7c8a738d6466ffe86834 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py @@ -0,0 +1,24487 @@ +# mypy: ignore-errors + +from functools import wraps, partial +from itertools import product, chain, islice +import itertools +import functools +import copy +import operator +import random +import unittest +import math +import enum + +import torch +import numpy as np +from torch import inf, nan + +from typing import Any, Dict, List, Tuple, Union, Sequence +from torch.testing import make_tensor +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, + floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, + all_types, empty_types, complex_types_and, integral_types, custom_types, +) +from torch.testing._internal.common_device_type import \ + (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIf, precisionOverride, + skipCPUIfNoMklSparse, + toleranceOverride, tol) +from torch.testing._internal.common_cuda import ( + PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, + SM53OrLater, SM80OrLater, SM90OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, + _get_torch_rocm_version, +) +from torch.testing._internal.common_utils import ( + make_fullrank_matrices_with_distinct_singular_values, + TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, + torch_to_numpy_dtype_dict, numpy_to_torch_dtype, TEST_WITH_ASAN, + GRADCHECK_NONDET_TOL, slowTest, TEST_WITH_SLOW, + TEST_WITH_TORCHINDUCTOR +) +from torch.testing._utils import wrapper_set_seed + +import torch._refs as refs # noqa: F401 +import torch._refs.nn.functional +import torch._refs.special +import torch._refs.linalg +import torch._prims as prims # noqa: F401 +from torch.utils import _pytree as pytree + + +from packaging import version + +from torch.testing._internal.opinfo.core import ( # noqa: F401 + L, + M, + S, + XS, + _NOTHING, + _getattr_qual, + DecorateInfo, + SampleInput, + ErrorInput, + AliasInfo, + NumericsFilter, + OpInfo, + _generate_reduction_inputs, + _generate_reduction_kwargs, + sample_inputs_reduction, + ReductionOpInfo, + reference_inputs_elementwise_binary, + make_error_inputs_elementwise_binary, + generate_elementwise_binary_tensors, + generate_elementwise_binary_arbitrarily_strided_tensors, + generate_elementwise_binary_small_value_tensors, + generate_elementwise_binary_large_value_tensors, + generate_elementwise_binary_extremal_value_tensors, + generate_elementwise_binary_broadcasting_tensors, + generate_elementwise_binary_with_scalar_samples, + generate_elementwise_binary_with_scalar_and_type_promotion_samples, + generate_elementwise_binary_noncontiguous_tensors, + sample_inputs_elementwise_binary, + BinaryUfuncInfo, + sample_inputs_elementwise_unary, + generate_elementwise_unary_tensors, + generate_elementwise_unary_small_value_tensors, + generate_elementwise_unary_large_value_tensors, + generate_elementwise_unary_extremal_value_tensors, + reference_inputs_elementwise_unary, + UnaryUfuncInfo, + sample_inputs_spectral_ops, + SpectralFuncType, + SpectralFuncInfo, + ShapeFuncInfo, + sample_inputs_foreach, + ForeachFuncInfo, + gradcheck_wrapper_hermitian_input, + gradcheck_wrapper_triangular_input, + gradcheck_wrapper_triangular_input_real_positive_diagonal, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + clone_sample, +) +from torch.testing._internal.opinfo.refs import ( # NOQA: F401 + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, + ReductionPythonRefInfo, + ElementwiseUnaryPythonRefInfo, + ElementwiseBinaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, + reference_reduction_numpy, + prod_numpy +) +from torch.testing._internal import opinfo +from torch.testing._internal.opinfo.definitions.linalg import ( + sample_inputs_linalg_cholesky, + sample_inputs_linalg_cholesky_inverse, + sample_inputs_cross, + sample_inputs_linalg_qr_geqrf, + sample_inputs_linalg_invertible, + sample_inputs_lu_solve, + sample_inputs_legacy_solve, + sample_inputs_svd, + sample_inputs_linalg_det_logdet_slogdet, + sample_inputs_linalg_lu, + sample_inputs_diagonal_diag_embed, + error_inputs_diagonal_diag_embed, +) +from torch.testing._internal.opinfo.definitions.special import ( + sample_inputs_i0_i1, + sample_inputs_polygamma, + reference_polygamma, +) +from torch.testing._internal.opinfo.definitions._masked import ( + sample_inputs_softmax_variant, +) +from torch.testing._internal.opinfo.definitions.sparse import ( + error_inputs_sparse_like_fns, + sample_inputs_sparse_like_fns, + error_inputs_sparse_mul, + sample_inputs_sparse_mul, + error_inputs_sparse_reduction_sum, + sample_inputs_sparse_reduction_sum +) + +if TEST_SCIPY: + from scipy import stats + import scipy.spatial + import scipy.special + + +# test if a tensor is close to an integer +def close_to_int(x, eps=0.1): + if x.is_complex(): + y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) + else: + y = torch.abs(torch.frac(x)) + return (y < eps) | (y > (1 - eps)) + + +def sample_inputs_slice(op_info, device, dtype, requires_grad, **kwargs): + + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + yield SampleInput(make_input(3), 0) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2, step=3) + + yield SampleInput(make_input(20, 30, 40), dim=0, start=-10, end=-2, step=2) + + +def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + args_cases = ( + # Cases with tensor indices. + (torch.tensor([1, 2, 3]),), + (torch.tensor(1),), + (torch.tensor([1, 2, 3]), 1), + (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), + # Cases with list of indices. + ((2, 4),), + ((2, 4), 1), + ((2, 4), -1), + # Cases with integer section. + (3,), + (3, 1), + (3, -1), + ) + + for args in args_cases: + yield SampleInput(make_input((S, S, S)), args=args) + + +def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6, S), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + yield SampleInput(make_arg(S, S, 6), 2) + +def error_inputs_hsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " + "but got a tensor with 0 dimensions!") + yield ErrorInput(SampleInput(make_arg(()), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg((S, S, S)), 0), error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput( + SampleInput(make_arg((S, S, S)), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_vsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), + error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput(SampleInput(make_arg(S, S, S), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_dsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) + + +def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = ( + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), + ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), + ) + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + kwargs = dict(storage_offset=storage_offset) + yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) + +def sample_inputs_as_strided_partial_views(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(): + base = make_tensor((20,), device=device, dtype=dtype) + return base[5:15].requires_grad_(requires_grad) + + # as_strided on offset, partial views + yield SampleInput(make_arg(), (2, 2), (1, 2)) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=0) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=10) + +def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = [ + ((1,), (), (), 0), + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((3, 3), (2, 2), (2, 1), 0), + # Scatter to larger dimensions + ((16,), (2, 2, 2, 2), (8, 4, 2, 1), 0), + # Scatter to larger dimensions with strides inverted + ((16,), (2, 1, 1, 2), (1, 2, 4, 8), 0), + ] + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + input_src = make_arg(output_shape) + yield SampleInput(input_t, input_src, output_shape, stride, storage_offset=storage_offset) + + +def error_inputs_as_strided_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # Create a small tensor and try to scatter it out of bounds + input_t = make_arg([4, 4]) + input_src = make_arg([2, 2]) + yield ErrorInput( + SampleInput(input_t, input_src, [2, 2], [200, 200], storage_offset=0), + error_regex="itemsize 4 requiring a storage size of 1604 are out of bounds for storage of size 64" + ) + + +def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): + inputs = ( + (0,), + (0, 1), + (0, 1, 2, 3), + ) + + rvals = [1, 2, 4] + + products = product(inputs, rvals, [False, True]) + + for input_data, r, with_replacement in products: + input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(input_t, r=r, with_replacement=with_replacement) + +def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # constructs 1-D tensors with varying number of elements + a = make_arg((0,)) + b = make_arg((0, 1)) + c = make_arg((0, 1, 2, 3)) + + # sample with only 1 tensor + yield SampleInput(a) + + # sample with 2 tensors + yield SampleInput(a, b) + + # sample with 3 tensors + yield SampleInput(a, b, c) + +def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input_shape, dict of dim and eps + cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S), {'dim': 1}), + ((S, 2), {'dim': -1}), + ((S,), {'dim': 0, 'eps': 0.5}), + ((), {'dim': 0}), + ((S, S, M), {'dim': 2}), + ((S, S), {}) + ) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) + # Test for Broadcasting + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) + yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + + +def sample_inputs_item(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + cases = ( + (), + (()), + (1), + ((1,)), + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def error_inputs_item(op, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + + cases = ( + (M), + ((S,)), + (S, S), + (S, M, L), + ) + + for shape in cases: + yield ErrorInput( + SampleInput(make_arg(shape)), error_type=RuntimeError, + error_regex="elements cannot be converted to Scalar") + + +def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for training, momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), + ((3, 2, 4), {'training': False, 'momentum': -1.2}), + ((3, 1), {'training': True, 'momentum': 0.0}), + ((0,), {'training': True}), + ((0,), {'training': False}), + ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), + ((2, 1), {}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight = make_arg(channels) if channels > 0 else None + bias = make_arg(channels) if channels > 0 else None + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + weight, + bias + ), + kwargs=kwargs + ) + + # Checking for permutations of weights and biases as `None` + weights = [channels, None, None] + biases = [None, channels, None] + is_training = [True, False, False] + + for weight, bias, training in zip(weights, biases, is_training): + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + make_arg(channels), + make_arg(channels) + ), + kwargs={'training': training} + ) + + # Test case for no optional kwargs + # running_mean and running_var are required in evaluation mode (training: False) but not in training mode + yield SampleInput(make_arg((1, 2, 3)), args=(None, None, None, None), kwargs={'training': True}) + +def sample_inputs_softmax_backward_data(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), 0), + ((S, S), 0), + ((S, M, S), -1), + ] + input_dtypes = [dtype] + if dtype == torch.float and device == 'cuda': + input_dtypes += [torch.float16] + + for (shape, dim), input_dtype in product(cases, input_dtypes): + input = make_arg(shape) + output = torch.nn.functional.softmax(input, dim=dim, dtype=input_dtype) + yield SampleInput(make_arg(shape), output, dim, input_dtype) + +def sample_inputs_native_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + + +def sample_inputs__native_batch_norm_legit(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if args[0] is not None and args[1] is not None: + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + else: + yield SampleInput(sample.input, args=(args[2], args[3], training, momentum, eps)) + +def sample_inputs__batch_norm_with_update(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if any(args[i] is None for i in range(4)): + continue + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], momentum, eps)) + +def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs): + op_kwargs = op_info.sample_kwargs(device, dtype, None)[0] + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad, + op_kwargs=op_kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + for weight in [-1., 0., 0.8, 1.]: + weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(shape), args=(weight_tensor,)) + + channel_size = shape[1] if len(shape) >= 2 else 1 + yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) + + weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) + +def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs) + yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs) + +def sample_kwargs_prelu_scalar_weight(device, dtype, input): + weight = torch.rand((), device=device, dtype=dtype) + # NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case + if dtype == torch.bfloat16: + weight_cpu = weight.to(dtype=torch.float32, device="cpu") + else: + weight_cpu = weight.cpu() + np_weight = weight_cpu.numpy() + return ({'weight': weight}, {'weight': np_weight}) + +def error_inputs_prelu(op, device): + # Weight has numel != 1, but self.ndim is zero-dim tensor + inp = make_tensor((), device=device, dtype=torch.float32) + weight = make_tensor((2,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Not allow zero-dim input tensor.") + + # Weight has numel != 1, but numel does not match channel size + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((9,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Mismatch of parameter numbers and input channel size.") + + # Weight is neither a scalar nor 1-D tensor + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((2, 4), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = 2") + + # src and index tensors must have the same # of dimensions +def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # ord = inf is tested in inputs_norm_inf as it fails on some tests + cases = [ + ((S, S), (2,), '2'), + ((S, S), (0,), '0'), + ((S, S), (0.5,), '0_5'), + ((S, S), (1,), '1'), + ((S, S), (3,), '3'), + ((S, S), (-1,), 'neg_1'), + ((S, S), (-2,), 'neg_2'), + ((S, S), (-0.5,), 'neg_0_5'), + ((S, S), (-1.5,), 'neg_1_5'), + ] + + cases_nonzero_input = ( + ((S, S, S), (1.5,), '1_5_default'), + ((S, S, S), (1.5, 1), '1_5_dim'), + ((S, S, S), (1.5, -1), '1_5_neg_dim'), + ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), + ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), + ) + + cases_posdim = ( + ((S, S), (-2, 1,), 'neg_2_dim'), + ((S, S), (-1, 1,), 'neg_1_dim'), + ((S, S), (0, 1,), '0_dim'), + ((S, S), (1, 1,), '1_dim'), + ((S, S), (2, 1,), '2_dim'), + ((S, S), (3, 1,), '3_dim'), + ((S, S, S), (2, 1), '2_dim'), + ((S, S, S), (3, 1), '3_dim'), + ((S, S, S), (2, 1, True), 'keepdim_2_dim'), + ((S, S, S), (3, 1, True), 'keepdim_3_dim'), + ((), (2, 0), '2_dim_scalar'), + ((), (3, 0), '3_dim_scalar'), + ((), (2, 0, True), 'keepdim_2_dim_scalar'), + ((), (3, 0, True), 'keepdim_3_dim_scalar'), + ) + + cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) + for shape, args, name in cases_posdim) + + for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): + yield SampleInput(make_arg(shape), args=args, name=name) + + for shape, args, name in cases_nonzero_input: + yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) + + +def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (), 'default'), + ((S, S), ('fro',), 'fro_default'), + ((S, S), ('fro', [0, 1],), 'fro'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), ('nuc',), 'nuc'), + ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (-inf,), '-inf'), + ((S, S), (inf,), 'inf'), + ((S, S), (inf, 1,), 'inf_2_dim'), + ((S, S), (inf, -1,), 'inf_2_neg_dim'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((), (S,)), + ((S, 1), (S,)), + ((M, S), ()), + ((S, S), (S, S)) + ) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + rhs = make_arg(shape_rhs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input) + if shape_lhs == shape_rhs: + yield SampleInput(lhs, args=(lhs.clone().detach_(),)) + + +def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + num_inputs = kwargs.get('num_inputs') + sample_kwargs = kwargs.get('sample_kwargs', {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + + args = [] + for i in range(num_inputs - 1): + args.append(make_arg(shape_rhs)) + broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) + + yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) + +def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((S, 1), S), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + for shape in shapes: + inp, *arg0 = shape + yield SampleInput(inp, args=tuple(arg0)) + +def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds alpha kwarg cases + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) + neg_alpha = -3.125 if (dtype.is_floating_point or dtype.is_complex) else -3 + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) + +def error_inputs_arange(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer') + yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range') + yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range') + +def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs): + int_samples = ( + # positive direction + (-1, 2, 2), + # negative direction + (2, -3, -1), + # start == end + (1, 1, 1), + (1, 1, -1), + # divides evenly + (0, -8, -4), + (1, 5, 2), + # bool + (False, True, True), + # default step + (0, 1, None), + # default start + (None, 3, None), + ) + + def to_float(start, end, step): + start = start + 0.1 if start is not None else None + end = end + 0.1 + step = float(step) if step is not None else None + return start, end, step + + float_samples = ( + # includes endpoint + (0., -8. - 1e-6, -4.), + (1., 5. + 1e-6, 2.), + (0., -8., -4.), + (1., 5., 2.), + *(to_float(start, end, step) for (start, end, step) in int_samples), + ) + + large_samples = ( + (0, 10000, None), + ) + + samples = int_samples + float_samples + if dtype not in (torch.int8, torch.uint8): + samples += large_samples + + for start, end, step in samples: + if start is None: + assert step is None + # Pass end as positional arg + yield SampleInput(end, kwargs={"dtype": dtype, "device": device}) + # (Similar to) calling torch.arange(end=3) + yield SampleInput(0, kwargs={"end": end, "dtype": dtype, "device": device}) + elif step is None: + yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(2) + yield SampleInput(1, args=(3, 1)) + +def sample_inputs_randn(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + shapes = ( + (M,), + (S, S) + ) + + for shape in shapes: + yield SampleInput(input=shape, kwargs=dict(dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((S, S), 0, 5), + ((S, S, S), -2, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + +def error_inputs_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = -1 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"normal expects std >= 0.0, but found std {invalid_std}", + ) + +def sample_inputs_cauchy(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.5), + ((S, S), 0, 1), + ((S, S, S), -2, 1), + ) + for shape, median, gamma in samples: + yield SampleInput(make_arg(shape), args=(median, gamma)) + + +def error_inputs_cauchy(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_scale = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_scale,)), + error_type=RuntimeError, + error_regex=fr"cauchy_ expects sigma > 0.0, but found sigma={invalid_scale}", + ) + + +def sample_inputs_exponential(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.5), + ((S, S), 1), + ((S, S, S), 1.5), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_exponential(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_rate = 0 + yield ErrorInput( + SampleInput(t, args=(invalid_rate,)), + error_type=RuntimeError, + error_regex=fr"exponential_ expects lambda > 0.0, but found lambda={invalid_rate}", + ) + + +def sample_inputs_geometric(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.2), + ((S, S), 0.5), + ((S, S, S), 0.8), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_geometric(op, device, **kwargs): + t = torch.zeros([10], device=device) + neg_prob = -1 + yield ErrorInput( + SampleInput(t, args=(neg_prob,)), + error_type=RuntimeError, + error_regex=fr"geometric_ expects p to be in \(0, 1\), but got p={neg_prob}", + ) + + +def sample_inputs_log_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.25), + ((S, S), 0.5, 1), + ((S, S, S), 0, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + + +def error_inputs_log_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"log_normal_ expects std > 0.0, but found std={invalid_std}", + ) + + +def sample_inputs_uniform(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), -100, 100), + ((S, S), 0, 1), + ((S, S, S), 1, 2), + ) + for shape, hi, lo in samples: + yield SampleInput(make_arg(shape), args=(hi, lo)) + +def sample_inputs_ones_zeros(op, device, dtype, requires_grad, **kwargs): + # this is a bit messy, as we want the args to be tuples + # so if we pass size as a tuple, we have a tuple containing a tuple + sizes = ( + (M,), + (S, S), + ) + for size in sizes: + yield SampleInput(size, kwargs={'dtype': dtype, 'device': device}) + +def sample_inputs_full(op, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + sizes = ( + (M,), + (S, S), + ) + fill_values = [get_val(dtype), get_val(torch.int)] + + for size, fill_value in product(sizes, fill_values): + yield SampleInput(size, fill_value, dtype=dtype, device=device) + + +def error_inputs_uniform(op, device, **kwargs): + t = torch.zeros([10], device=device) + yield ErrorInput( + SampleInput(t, args=(3, -1)), + error_type=RuntimeError, + error_regex=r"uniform_ expects to return a \[from, to\) range, but found from=3 > to=-1", + ) + + +def error_inputs_linspace(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative') + yield ErrorInput( + SampleInput(0, args=(3, 1.)), + error_type=TypeError, + error_regex="received an invalid combination of arguments - got \\(int, int, float", + ) + yield ErrorInput( + SampleInput(torch.tensor([1, 1], device=device), args=(torch.tensor([3, 3], device=device), 1)), + error_type=RuntimeError, + error_regex="only supports 0-dimensional start and end tensors" + ) + + +def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)] + for start, end, nstep in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_linspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device, requires_grad=False) + + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps, is_start_end_tensors)) + [(0, 7, 50, (True, True))] + for start, end, nstep, (is_start_tensor, is_end_tensor) in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + + tensor_options = {"dtype": dtype, "device": device} + if is_start_tensor: + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if is_end_tensor: + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_logspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + for start, end, nstep, base in product(starts, ends, nsteps, bases): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_logspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device) + for start, end, nstep, base, (is_start_tensor, is_end_tensor) in product(starts, ends, nsteps, bases, is_start_end_tensors): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + + tensor_options = {"dtype": dtype, "device": device} + + if (is_start_tensor): + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if (is_end_tensor): + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Creates additional inputs to test the rtol, atol, and equal_nan params + rtols = [0., 1e-7] + atols = [0., 1e-7] + equal_nans = [False, True] + + products = product(rtols, atols, equal_nans) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for rtol, atol, equal_nan in products: + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs, args=(rhs,), + kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) + + +def error_inputs_isclose(op, device, **kwargs): + make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}), + error_type=RuntimeError, + error_regex='rtol must be greater than or equal to zero') + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}), + error_type=RuntimeError, + error_regex='atol must be greater than or equal to zero') + + +def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((1, 2))) + yield SampleInput(make_arg((2,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + first_shape, second_shape = (S, M), (M, S) + + yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) + + if dtype.is_complex: + yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) + + # Matmul of empty matrices + yield SampleInput(make_arg((0, S)), args=(make_arg(S, M),)) + yield SampleInput(make_arg((S, 0)), args=(make_arg(0, M),)) + + +def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) + beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) + tests_list = [ + ((2, 3), (2, 2), (2, 3), False), + ((3, 3), (3, 3), (3, 3), False), + ] + tests_with_lhs_broadcasting = [ + ((1,), (2, 2), (2, 3), True), + ((), (2, 2), (2, 3), True), + ] + test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] + + kwargs = dict(alpha=alpha_val, beta=beta_val) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape_a, shape_b, shape_c, broadcasts_input in test_cases: + yield SampleInput( + make_arg(shape_a), + make_arg(shape_b), + make_arg(shape_c), + **kwargs, + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shape = (3, 3) + yield SampleInput( + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + make_arg(shape), + **kwargs, + ) + yield SampleInput( + make_arg(shape), + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + **kwargs, + ) + # addmm of empty matrices + if dtype.is_floating_point: + yield SampleInput(make_arg(S, M), make_arg(S, 0), make_arg(0, M), **kwargs) + # empty matmul with broadcastable input + yield SampleInput(make_arg(M), make_arg(S, 0), make_arg(0, M), **kwargs).with_metadata(broadcasts_input=True) + +def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha = 2 + 3j if dtype.is_complex else 0.6 + beta = 1 + 2j if dtype.is_complex else 0.2 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C + for m, n, k in itertools.product([0, 5], repeat=3): + yield SampleInput( + torch.eye(m, n, device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + make_arg((k, n)), + alpha=alpha, + beta=beta, + ) + +def sample_inputs_sparse_mm_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + reductions = ["sum", "mean", "amax", "amin"] + for m, k, reduce in product([5, 7], [3, 11], reductions): + yield SampleInput( + torch.eye(m, m) + .to(device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + reduce, + ) + + +def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, M), make_arg(M)) + +def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(M, S, M), make_arg(M, M, S)) + +def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + yield SampleInput(make_arg((S, )), make_arg((S, ))) + if dtype.is_complex: + # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) + # is tested in test_conj_view (which tests operations with only conjugated input tensor + # -- not conjugated arg tensors) + yield SampleInput(make_arg((S, )), make_arg_conj((S, ))) + + +def error_inputs_dot_vdot(op_info, device, is_ref=False, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + if not is_ref: + yield ErrorInput(SampleInput(make_input(1), args=(make_input(3, dtype=torch.float16),)), + error_regex='dot : expected both vectors to have same dtype') + yield ErrorInput(SampleInput(make_input(1, 1), args=(make_input(3),)), + error_regex='1D tensors expected') + yield ErrorInput(SampleInput(make_input(9), args=(make_input(3),)), + error_regex='inconsistent tensor size') + if device != "cpu" and not is_ref: + yield ErrorInput(SampleInput(make_input(3), args=(make_input(3, device="cpu"),)), + error_regex='Expected all tensors to be on the same device') + + +def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = (((S,), (S, M), (M,), 1, 1, False), + ((S,), (S, M), (M,), 0.2, 0.6, False), + ) + + test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), + ((1,), (S, M), (M,), 0.2, 0.6, True), + ((), (S, M), (M,), 1, 1, True), + ((), (S, M), (M,), 0.2, 0.6, True), + ) + + cases = test_cases + test_cases_with_broadcast + + # addmv performs: beta * M + alpha * (mat @ vec) + for size, mat, vec, beta, alpha, broadcasts_input in cases: + yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) + +def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting + test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + + for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: + if dtype.is_complex: + beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) + +def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + test_cases = [(((S, S), (S, S), (S, S)), False), + (((S, S), (S, 1), (1, S)), False), + (((1,), (S, S, 1), (1, S)), True), + (((), (), ()), False), + (((S, S), (), ()), True), + (((), (S, S, 1), (1, S)), True) + ] + + for input_args, broadcasts_input in test_cases: + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput(*args).with_metadata(broadcasts_input=broadcasts_input) + + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput( + *args, value=3.14 if dtype.is_floating_point or dtype.is_complex else 3 + ).with_metadata(broadcasts_input=broadcasts_input) + +def reference_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_addcmul_addcdiv( + op_info, device, dtype, requires_grad, **kwargs) + + # type promotion cases + supported_dtypes = op_info.supported_dtypes(device) + make_arg = partial(make_tensor, device=device, requires_grad=requires_grad) + + types = ( + (torch.float64, torch.complex128), + (torch.bfloat16, torch.float32), + ) + + values = ( + None, + True, False, + 3.14, 3, + 1.0, 1, + 0.0, 0, + -3.14, -3, + 3.14 + 2.71j, + ) + + for (type2, type3), value in product(types, values): + if (type2 not in supported_dtypes or + type3 not in supported_dtypes): + continue + + # RuntimeError: value cannot be converted without overflow + if (type(value) is complex and + type2 is not torch.complex128): + continue + + arg1 = make_arg([5, 5], dtype=dtype) + arg2 = make_arg([5, 5], dtype=type2) + arg3 = make_arg([1, 5], dtype=type3) + + # TypeError: addcdiv(): argument 'value' must be Number, not NoneType + if value is not None: + yield SampleInput(arg1, args=(arg2, arg3), kwargs=dict(value=value)) + else: + yield SampleInput(arg1, args=(arg2, arg3)) + +def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): + test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta, + alpha=alpha + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shapes = [(S, S, S), (S, M, S), (S, S, M)] + args = tuple(make_arg(s) for s in shapes) + yield SampleInput( + args[0].transpose_(-1, 1), + args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), + args[2].transpose(-1, 1).conj().requires_grad_(requires_grad), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ) + +# TODO: add reduction kwargs +def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (S,), + (S, S), + ) + + for shape in shapes: + # Produce one with weight and one without. + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), + kwargs={'weight': _make_tensor(shape, requires_grad=False)}) + +def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None + ) + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M)) + + yield SampleInput(make_arg(), make_arg(S), make_arg(M)).with_metadata(broadcasts_input=True) + + if dtype.is_complex: + alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j + elif dtype.is_floating_point: + alpha, beta = 0.2, 0.6 + else: + alpha, beta = 2, 3 + + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M), beta=beta, alpha=alpha) + + yield SampleInput( + make_arg(), + make_arg(S), + make_arg(M), + beta=beta, + alpha=alpha, + ).with_metadata(broadcasts_input=True) + + # These samples fail gradcheck + if dtype.is_floating_point and not requires_grad: + tensor_options = dict(device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput( + torch.tensor([[math.nan]], **tensor_options), + torch.tensor([0.0], **tensor_options), + torch.tensor([0.0], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + + yield SampleInput( + torch.tensor([[0.0]], **tensor_options), + torch.tensor([math.nan], **tensor_options), + torch.tensor([math.nan], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + +def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ((), (S, S, S), (S,)) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1), {}), + ((S,), make_target([], low=0, high=S), {"p": 1}), + ((S,), make_target([1], low=0, high=S), {"p": 2}), + ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), + ((S, M), make_target([S], low=0, high=M), {"margin": -3.14}), + ((M, S), make_target([M], low=0, high=S), {"weight": None}), + ((M, S), make_target([M], low=0, high=S), {"weight": make_weight([S], low=-10., high=10.)}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "none"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "mean"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "sum"}), + ) + + for input_shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1)), + ((S,), make_target([], low=0, high=S)), + ((S,), make_target([1], low=0, high=S)), + ((M, S), make_target([M], low=0, high=S)), + ) + ps = (1, 2) + margins = (0, 7, -3.14) + weights = (False, True) + reductions = (None, "none", "mean", "sum") + + for (input_shape, target), p, margin, weight, reduction in product(inputs, ps, margins, weights, reductions): + input = _make_tensor(input_shape) + weight_shape = [input.size(-1)] if input.ndim > 0 else [1] + weight = make_weight(weight_shape, low=-10., high=10.) if weight else None + kwargs = {"p": p, "margin": margin, "weight": weight} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(input, args=(target,), kwargs=kwargs) + + +def error_inputs_multi_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, error_regex=r'inconsistent target size, expected 5 but got \[5, 4\]') + # invalid target dtype + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, error_regex='expected scalar type Long but found Float') + # invalid weight + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(())}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5, 4)}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5,)}), + error_type=RuntimeError, error_regex=r'inconsistent weight size, expected 4 but got \[5\]') + # invalid p + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'p': 3}), + error_type=ValueError, error_regex='only p == 1 and p == 2 supported') + + +def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((), (0,), True), + ((S, S), (1,), True), + ((S, S), (1,), False), + ((S, S), (-2,), False), + ((S, S), (0, 1), False), + ) + # Test large inputs to check numerical stability + lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64, torch.complex64, torch.complex128) else (None,) + for low in lows: + high = low * 2 if low is not None else None + for shape, dim, keepdim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=low, high=high, + requires_grad=requires_grad) + yield SampleInput(t, dim, keepdim) + +def reference_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs) + + # https://github.com/pytorch/pytorch/issues/91843 + t = torch.tensor([20, 30, 100], dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + t = torch.tensor((), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + # tests masking + # https://github.com/pytorch/pytorch/pull/91860#pullrequestreview-1241344073 + t = torch.tensor(float("inf")) + yield SampleInput(t, 0, True) + +def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + inputs = [ + ((), {}), + ((S, S), {}), + ((0, S, 0), {}), + ((S,), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), {'dtype': torch.double}), + ((S,), {'device': 'cpu'}), + ((S,), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), {'device': 'cuda'})) + + for shape, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, **kwargs) + +def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) + + # shape + cases = ( + (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in cases: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + +def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + + inputs = ( + ([], make_target([], low=0, high=1), {}), + ([S], make_target([S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "none"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "mean"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "sum"}), + ) + + for shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_target_tensor = partial(torch.tensor, device=device, dtype=torch.long, requires_grad=False) + + inputs = ( + # random tests including -1 target labels + ([], make_target([], low=-1, high=1)), + ([S], make_target([S], low=-1, high=S)), + ([M, S], make_target([M, S], low=-1, high=S)), + # repeated target labels and -1 (labels after the first -1 are ignored) + ([], make_target_tensor(-1)), + ([7], make_target_tensor([2, 0, 6, -1, 4, -1, 6])), + ([4, 5], make_target_tensor([[4, -1, 0, -1, 2], [0, 0, 4, 1, 4], [-1, 3, -1, 1, 0], [4, 3, 2, 1, 0]])), + ) + reductions = (None, "none", "mean", "sum") + + for (shape, target), reduction in product(inputs, reductions): + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def error_inputs_multilabel_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(0,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(4,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[4\] for input of size: \[5, 4\]') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input((),),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[\] for input of size: \[5, 4\]') + + +def get_independent_tensor(tensor): + return tensor.clone().requires_grad_(tensor.requires_grad) + +def sample_inputs_randint(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + sample.kwargs.setdefault('device', device) + # With high + yield SampleInput(high, sample.input.shape, *sample.args, **sample.kwargs) + # With low and high + yield SampleInput(low, high, sample.input.shape, *sample.args, **sample.kwargs) + +def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + # With high + yield SampleInput( + sample.input, + high, + *sample.args, + **sample.kwargs) + # With low and high + yield SampleInput( + get_independent_tensor(sample.input), + low, + high, + *sample.args, + **sample.kwargs) + +def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (), + (S,), + (S, S), + (S, S, S), + ) + + margins = (0., 1.) + reductions = ('sum', 'mean', 'none') + + for shape in shapes: + for margin, reduction in product(margins, reductions): + kwargs = {'margin': margin, 'reduction': reduction} + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=False), + _make_tensor(shape, requires_grad=False)), + kwargs=kwargs) + +def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp1 = make_input((10, )) + inp1[2] = float('nan') + inp2 = make_input((10, )) + inp2[4] = float('nan') + target = make_input((10, )) + inp2[9] = float('nan') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Inf handling + inp1 = make_input((10, )) + inp2[1] = float('inf') + inp2 = make_input((10, )) + inp2[4] = float('inf') + target = make_input((10, )) + inp2[7] = float('inf') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Broadcasting + inp1 = make_input((5, 2)) + inp2 = make_input((5, 1)) + target = make_input((1, 2)) + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + +def error_inputs_margin_ranking_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value. + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + # invalid input shapes + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), + error_regex='margin_ranking_loss : All input tensors should') + +def sample_inputs_new_fns(self, device, dtype, requires_grad, *, is_strided=False, **kwargs): + # input_shape, output_shape, strides, kwargs + # lengths of output_shape and strides must be equal + inputs = [ + ((), (), (), {}), + ((S, S), (2, 0), (3, 4), {}), + ((0, S, 0), (3, 2, 2), (1, 2, 3), {}), + ((S,), (2, 3), (7, 8), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), (10,), (S,), {'dtype': torch.double}), + ((S,), (1, 1, 12), (S, L, M), {'device': 'cpu'}), + ((S,), (2, 2, 2), (L, M, S), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), (7, 2), (3, 4), {'device': 'cuda'})) + + for input_shape, output_shape, strides, kwargs in inputs: + t = make_tensor(input_shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + if is_strided: + yield SampleInput(t, output_shape, strides, **kwargs) + else: + yield SampleInput(t, output_shape, **kwargs) + +def sample_inputs_empty_strided(op, device, dtype, requires_grad=False, **kwargs): + + inputs = [ + ((), (), {'dtype': dtype, 'device': device}), + ((S,), (4,), {'dtype': dtype, 'device': device}), + ((S, S), (2, 1), {'dtype': dtype, 'device': device}), + ((S, S, S), (2, 0, 1), {'dtype': dtype, 'device': device}), + ] + + for shape, strides, kwargs in inputs: + yield SampleInput(shape, strides, requires_grad=requires_grad, **kwargs) + +def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + yield SampleInput(case, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_empty_permuted(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + for layout in itertools.permutations(range(len(case))): + yield SampleInput(case, layout, device=device, dtype=dtype, requires_grad=requires_grad) + +def error_inputs_empty_permuted(op_info, device, **kwargs): + yield ErrorInput( + SampleInput((2,), args=((0, 1),)), + error_type=RuntimeError, + error_regex="Number of dimensions in size does not match the length of the physical_layout" + ) + yield ErrorInput( + SampleInput((2,), args=((3,),)), + error_type=RuntimeError, + error_regex="Dimension out of range" + ) + yield ErrorInput( + SampleInput((2, 3), args=((0, 0),)), + error_type=RuntimeError, + error_regex="Duplicate dim not allowed" + ) + +def sample_inputs_scalar_tensor(op, device, dtype, requires_grad, **kwargs): + # Not including a scalar tensor in vals because meta tests start failing due to + # lack of meta support for _local_scalar_dense + # torch.tensor(2, device=device) + vals = (-5, 0, 1) + + for item in vals: + yield SampleInput(item, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_eye(op, device, dtype, requires_grad, **kwargs): + # only ints >= 0 are allowed for both arguments, unless m is omitted + sizes = (None, 0, 1, 2, 3, 4, 7, L, M, S) + + for n, m in product(sizes, sizes): + if n is None: + continue + + # TODO: no layout + _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} + if m is None: + yield SampleInput(n, args=(), kwargs=_kwargs) + else: + yield SampleInput(n, args=(m,), kwargs=_kwargs) + +def error_inputs_eye(op_info, device, **kwargs): + # TODO: no layout + _kwargs = {'device': device, 'dtype': torch.float32} + + yield ErrorInput( + SampleInput(-1, args=(), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -1" + ) + + yield ErrorInput( + SampleInput(-7, args=(42,), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -7" + ) + + yield ErrorInput( + SampleInput(0, args=(-3,), kwargs=_kwargs), + error_regex="m must be greater or equal to 0, got -3" + ) + + +def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): + # The scalar we are passing to new_full must be the same dtype + # as the one of the resulting tensor + use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype + yield SampleInput( + sample.input, *sample.args, get_val(use_dtype), **sample.kwargs) + +def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + inputs = [ + ((), get_val(dtype), {}), + ((S, S), get_val(dtype), {}), + ((0, S, 0), get_val(dtype), {}), + ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), get_val(torch.double), {'dtype': torch.double}), + ((S,), get_val(dtype), {'device': 'cpu'}), + ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) + + for shape, fill_value, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, fill_value, **kwargs) + +def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): + cases = [ + ([3], 3, {}), + ([10], 3, {}), + ([3, 10], 3, {}), + ([3], 3, dict(replacement=False)), + ([3], 3, dict(replacement=True)), + ([3, 4], 4, dict(replacement=True)), + ([3, 4], 4, dict(replacement=False)), + ] + + for shape, num_samples, kwargs in cases: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + yield SampleInput(t, num_samples, **kwargs) + +def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): + def get_value_or_make_tensor(value_or_shape): + if isinstance(value_or_shape, list): + return make_tensor(value_or_shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + return value_or_shape + + for value_or_mean_shape, value_or_std_shape, kwargs in cases: + mean = get_value_or_make_tensor(value_or_mean_shape) + std = get_value_or_make_tensor(value_or_std_shape) + yield SampleInput(mean, std, **kwargs) + +def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): + # value_or_size, value_or_size, kwargs + cases = [ + ([], [], {}), + ([3], [3], {}), + ([3, 4, 2], [3, 4, 2], {}), + ([2, 3], 1.1, {}), + ([1, 2, 3], [5, 2, 3], {}), # broadcasting + ] + + return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) + +def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): + yield SampleInput(1.6, 0.3, [2, 3], dtype=dtype, device=device) + yield SampleInput(1.6, 0.3, [2, 2, 2], dtype=dtype, layout=torch.strided, device=device) + yield SampleInput(2.7, make_tensor([4, 3], dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad)) + +def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): + shapes = [ + [3], + [], + [0, 3], + [2, 3, 4], + ] + + for shape in shapes: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=1, + requires_grad=requires_grad) + yield SampleInput(t) + +def error_inputs_bernoulli(op_info, device, **kwargs): + # more than one element of the written-to tensor refers to a single memory location + x = torch.rand((1,), device=device).expand((6,)) + err_msg = 'unsupported operation' + yield ErrorInput(SampleInput(torch.rand_like(x), kwargs={'out': x}), + error_regex=err_msg) + +def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((S, S, S), 0), + ((S, S, S), 1), + ((), 0), + ) + + for large_number in (True, False): + for shape, dim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + + if large_number and t.dim() > 0: + t[0] = 10000 + yield SampleInput(t, dim) + +def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): + yield SampleInput( + make_tensor((S, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + + +def error_inputs_trace(op, device): + yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix") + + +def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((S, S, S), (2, 1, 0.5)), + ((S, S, S), (2, -1, 0.5)), + ((S, S, S), (1, 2, 3)), + ((S, S, S), (float('inf'), 2, 0.5)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((1, 2, 3), (-1, -2)), + ((1, 2, 3), (-1, 2)), + ((1, 2, 3), (1, -2)), + ((1, 2, 3), (1, 2)), + ((), (0, 0)), + ((1, ), (0, 0)), + ((M, M), (0, 1)), + ((S, S, S), (2, 0)), ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def _numpy_ref_transpose(a, dim0, dim1): + if a.ndim <= 1: + return a + + return np.swapaxes(a, dim0, dim1) + +def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((1, 2, 3), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((M, M), (M, L)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def error_inputs_T(self, device, has_ndims_error=False): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # Deprecated behavior in regular PyTorch, but throws an error in primTorch: + # https://github.com/pytorch/pytorch/issues/86968 + if has_ndims_error: + # ndims == 1 + yield ErrorInput(SampleInput(make_arg(M)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + # ndims > 2 + yield ErrorInput(SampleInput(make_arg(M, S, L)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + +def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False): + """ + This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). + Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. + """ + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batches = [(), (2,)] + size = [3, 4] + for batch, m, n in product(batches, size, size): + k = 2 + a = make_arg((*batch, m, k)) + b = make_arg((*batch, n, k)) + yield a, b + + +def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # Function that's well defined on the outputs for complex inputs + def fn(usv): + U, S, V = usv + return U @ V.mH, S + + for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): + *batch, m, k = a.shape + n = b.shape[-2] + + # NOTE: since svd_lowrank relies on non rank-revealing SVD, + # it inherits the problem of unstable behavior with repeated + # singular values including zeros. + # Since we want to avoid (repeated) zeros as singular values, + # we can only use k for q. + # This issues could be resolved with using a rank-revealing SVD + # which does not include "zero" singular values. + yield SampleInput(a, b, q=k, M=None).with_metadata(output_process_fn_grad=fn) + + for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): + *batch, m, k = a.shape + n = b.shape[-2] + M = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a, b, q=k, M=M).with_metadata(output_process_fn_grad=fn) + +def chunk_iter(iterable, size): + it = iter(iterable) + while True: + chunk = tuple(islice(it, size)) + if not chunk: + break + yield chunk + +def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # we reuse samples from svd_lowrank which come in group of two with + # kwarg['M'] = None and with kwarg['M'] = + samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) + for s1, s2 in chunk_iter(samples, 2): + del s1.kwargs['M'] + del s2.kwargs['M'] + s1.kwargs['center'] = False + s2.kwargs['center'] = True + yield s1 + yield s2 + +def np_sinc_with_fp16_as_fp32(x): + # Wraps numpy's sinc function so that fp16 values are promoted to fp32 + # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated + # at 0 for fp16. + if x.dtype == np.float16: + return np.sinc(x.astype(np.float32)) + else: + return np.sinc(x) + +def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + return ( + SampleInput( + make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), + shape, + ) for size, shape in test_cases) + +def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) + + m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + + cases = ( + ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), + ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) + ) + + for a, b, c, d in cases: + yield SampleInput(m(a), args=(m(b), m(c), m(d))) + yield SampleInput(n(a), args=(n(b), n(c), n(d))) + +def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = ( + ((1, S), (2, S), (3, S),), + ((S, 1), (S, 2), (S, 3),), + ((1,), (2,), (3,),), + ((2, S), (S,)) + ) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + # We also want to test mixed complex-non-complex inputs to block_diag + if dtype == torch.complex32 or dtype == torch.complex64: + non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 + make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): + small_S = 2 + test_cases = ( + ((S, S, 2), (S, S + 1, 2)), + ((S, S), (S, S)), + ((S, S, S), (S, S, S)), + ((3, 5), (3, 5)), + ((2, 3, 5), (2, 3, 5)), + ((1, 2, 3), (1, 2, 3)), + ((1, 1), (S, 1)), + ((0, 5), (4, 5)), + ((4, 5), (0, 5)), + ((0, 4, 5), (3, 5)), + ((4, 5), (0, 3, 5)), + ((0, 4, 5), (1, 3, 5)), + ((1, 4, 5), (0, 3, 5)), + # Using S here would make this one test take 9s + ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), + ((small_S, 1, 1, small_S), (1, small_S, small_S)), + ((1, 1, small_S), (small_S, 1, small_S, small_S)), + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: + # FIXME add an override for JIT and revert 0. back to 0 + # since it's accepted by eager + for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: + for t1_size, t2_size in test_cases: + # The args should never be non-contiguous as this is not supported in the backward + yield SampleInput(make_arg(t1_size), make_arg(t2_size), p, cm) + +def _fill_np(a, value): + a = a.copy() + a.fill(value) + return a + +def _fill_sample_kwargs(device, dtype, input): + if dtype is torch.bool: + value = True + else: + value = 3 + + return ({'value': value}, {'value': value}) + +def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds a sample input where both tensors have the same values + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + lhs = make_arg((S, S)) + yield SampleInput(lhs, args=(lhs.clone(),)) + +def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape x number of tensors + cases = ( + ((3, 4), 1), + ((1, 2, 1, 4), 3), + ((0, 1, 0), 2),) + + for shape, num_tensors in cases: + tensors = [] + for _ in range(num_tensors): + tensors.append(make_arg(shape)) + for dim in range(-1, len(shape) - 1): + yield SampleInput(tensors, args=(dim,)) + + +def sample_inputs_chunk_cat(op_info, device, dtype, requires_grad, **kwargs): + # 1. If input tensors have different ndims, dim should be non-negative and be less than the ndims of every input tensors. + # If all input tensors have the same ndims, we support both negative and non-negative dim. + # 2. For wrapped_dim, all tensors should have the same size for 0,...,wrapped_dim-1 dimensions. + # No requirements for (wrapped_dim, ...)-th dimension. + # 3. Expect positive num_chunks + # 4. Expect non-empty input tensor list and each input tensor should have at least 1 element + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + same_ndim_cases = ( + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], -1, 5 + ), + ( + [ + torch.Size([1, 2, 129]), + torch.Size([1, 2, 297]), + ], -1, 5 + ), + ( + [ + torch.Size([1, 2, 3]), + torch.Size([1, 2, 3]), + ], 1, 5 + ), + ( + [ + torch.Size([3, 3, 2, 1]), + torch.Size([1, 4, 2, 2]), + torch.Size([2, 1, 3, 3]), + ], 0, 2 + ), + ) + for sizes, dim, num_chunks in same_ndim_cases: + tensors = [] + for size in sizes: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + different_ndim_case = [ + torch.Size([2, 3, 3]), + torch.Size([2, 3, 1, 2]), + torch.Size([2, 3]), + torch.Size([2, 3, 2]), + torch.Size([2, 3, 271]), + ] + max_dim, num_chunks = 2, 3 + for dim in range(max_dim): + tensors = [] + for size in different_ndim_case: + tensors.append(make_arg(size)) + yield SampleInput(tensors, args=(dim, num_chunks)) + + +def error_inputs_chunk_cat(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # input tensors have different ndims but dim is negative + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], -1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-negative dim when input tensors have different ndims', + ) + + # input tensors have different ndims but dim >= ndim of some input tensors + sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects dim < ndim for all input tensors', + ) + + # some tensors have different sizes for 0, ..., dim-1 dimensions. + sizes, dim, num_chunks = [torch.Size([2, 3, 4]), torch.Size([4, 3])], 1, 3 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors', + ) + + # negative num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, -1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # zero as num_chunks + sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, 0 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects positive num_chunks', + ) + + # empty input tensor list + dim, num_chunks = 0, 1 + yield ErrorInput( + SampleInput([], args=(dim, num_chunks)), + error_regex='_chunk_cat expects a non-empty input tensor list', + ) + + # empty input tensor with 0 elements + sizes, dim, num_chunks = [torch.Size([0,]), torch.Size([3,])], 0, 1 + tensors = [make_arg(size) for size in sizes] + yield ErrorInput( + SampleInput(tensors, args=(dim, num_chunks)), + error_regex='_chunk_cat expects non-empty tensor', + ) + + +def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] + ((S, S), (S, S), {'dim': -1}), + ((S, S), (S, S), {'dim': 1}), + ((M, S), (S, S), {'dim': 0}), # different shapes + ((1, 2, 3), (1, 2, 3), {'dim': -2}), + ((0,), (0,), {'dim': 0}), # empty tensor + ((0,), (S, S), {'dim': 1}), # empty tensor with unempty and dim=1 (special case for legacy_cat_wrap_dim) + ((0, S), (S, S), {'dim': 0}), + ((1,), (1,), {}) # dim not passed, fallback to default + ) + + for input_shape1, input_shape2, kwargs in cases: + yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) + + # from coat_lite_mini + yield SampleInput([make_arg((2, 2, 2, 2), memory_format=torch.channels_last)], args=(1,),) + +def error_inputs_cat(op_info, device, **kwargs): + + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for more than one element of the written-to tensor refer to a single memory location + yield ErrorInput(SampleInput([make_arg((S, S)), make_arg((S, S))], + kwargs={'out': make_arg((1, S)).expand((2 * S, S))}), + error_regex='unsupported operation') + + # error inputs for empty tensors + yield ErrorInput(SampleInput([], kwargs={'dim': 1}), + error_regex='non-empty list of Tensors') + + # error inputs for different sizes + yield ErrorInput(SampleInput([make_arg((S, S, L, L)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S, S, L, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + + # error inputs for different dimensions + yield ErrorInput(SampleInput([make_arg((S - 1, 0)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S - 1, 0))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + + # error inputs for same memory locations + x = torch.zeros((0), device=device) + y = torch.randn((4, 6), device=device) + + err_msg = "the written-to tensor refer to a single memory location" + + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': x}), + error_regex=err_msg) + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': y}), + error_regex=err_msg) + + z = torch.zeros((4, 6), device=device) + yield ErrorInput(SampleInput((y, z), kwargs={'out': z[:2, :]}), + error_regex=err_msg) + + # error inputs for different devices + if torch.device(device).type == 'cuda': + x_cuda = make_tensor((3, 3), device=device, dtype=torch.float32) + y_cpu = make_tensor((3, 3), device='cpu', dtype=torch.float32) + yield ErrorInput(SampleInput((x_cuda, y_cpu)), + error_regex='Expected all tensors to be on the same device') + + # error inputs for different input sizes for more than 2 tensors + yield ErrorInput(SampleInput([make_arg((L, 1)), make_arg((L, 1, 1)), make_arg((L, 1, 1))]), + error_regex='Tensors must have same number of dimensions') + + yield ErrorInput(SampleInput([make_arg((S, 1, M)), make_arg((S, 1, 1)), make_arg((S, M, 1))], + kwargs={'dim': 1}), + error_regex='Sizes of tensors must match') + + # error inputs for None input + yield ErrorInput(SampleInput((make_arg((S, 1, 1)), None)), error_type=TypeError, + error_regex='got None') + + # error inputs for zero-dimensional tensors + yield ErrorInput(SampleInput([make_arg(()), make_arg(())]), + error_regex='zero-dimensional.*cannot be concatenated') + + # error inputs for different dtype of out tensors + d = make_tensor((2, 3), device=device, dtype=torch.double) + x = make_tensor((2, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'out': d}), error_type=TypeError, + error_regex='invalid combination of arguments') + +def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Noncontiguous type promoting tensors + a = make_arg((3, 4, 2)) + b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) + c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) + + yield SampleInput((a, b, c), kwargs={'dim': 1}) + + # Special 1D tensor with dim length of 0 case + a = make_arg((0,)) + b = make_arg((3, 2, 2)) + + yield SampleInput((a, b, a)) + yield SampleInput((a, a, a)) + +def _elementwise_type_promo_np(*args, type_promotion_kind): + def _maybe_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + flattened = pytree.arg_tree_leaves(*args) + transformed = tuple(_maybe_torch(a) for a in flattened) + result_dtype, _ = prims.utils.elementwise_dtypes( + *transformed, + type_promotion_kind=type_promotion_kind) + return torch_to_numpy_dtype_dict[result_dtype] + +def _cat_np(input_seq, dim=0): + inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) + + if len(inputs) == 0: + np_dtype = _elementwise_type_promo_np( + input_seq, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) + return np.empty(0, dtype=np_dtype) + + return np.concatenate(inputs, axis=dim) + +def _floor_divide_np(a, b): + dtype = _elementwise_type_promo_np( + a, + b, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + if isinstance(a, np.ndarray): + a = a.astype(dtype) + if isinstance(b, np.ndarray): + b = b.astype(dtype) + return np.floor_divide(a, b) + +def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + tensor_shapes = ( + # First Tensor being 1-D is special + # case for hstack + ((S,), (S,), (S,)), + ((S, S), (S, S), (S, S)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + yield SampleInput(tensors) + +def error_inputs_hstack_dstack_vstack(op, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + tensor_shapes = ( + ((S,), (S, S, S, S), (S,)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + # Different dimension tensor + yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions") + + # empty tensor list + yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList") + +def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs): + # Note: we don't do any tests where we unbind along 0-length dims + # because in that case unbind returns and empty tuple, and that breaks + # some assumptions in some backward tests in test_ops.py + shape_dims = (((S,), 0), + ((S, S), 0), + ((S, S), 1), + ((S, S), -1), + ((S, 0, S), 0), + ((S, S, S), 1), + ) + for shape, dim in shape_dims: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + requires_grad=requires_grad), + args=(dim,)) + +def error_inputs_unbind(op_info, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions") + yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError, + error_regex="Dimension out of range") + +def reference_unbind(t, dim): + """A numpy implementation of torch.unbind""" + return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim)) + +def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((M, S)), + 0, + gather_variable((S, S), 1, M, True, device=device)) + yield SampleInput( + make_arg((M, S)), + 1, + gather_variable((M, S // 2), 0, S, True, device=device)) + # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 + yield SampleInput( + make_arg((S,)), + 0, + torch.tensor([], dtype=torch.uint8, device=device)) + yield SampleInput( + make_arg((S,)), + 0, + torch.tensor([[], []], dtype=torch.uint8, device=device)) + # 0D tensor case + yield SampleInput( + make_arg(()), + 0, + torch.tensor([0], dtype=torch.int64, device=device)) + yield SampleInput( + make_arg(()), + 0, + torch.tensor(0, dtype=torch.int64, device=device)) + +def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): + for i in range(1 if dim == 0 else m): + for j in range(1 if dim == 1 else n): + for k in range(1 if dim == 2 else o): + ii = [i, j, k] + ii[dim] = slice(0, idx.size(dim) + 1) + idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] + +def error_inputs_gather(op_info, device, **kwargs): + # src is [1, 2] + # [3, 4] + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + + # idx is [0, 0] + # [1, 0] + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + + # Index should be smaller than self except on dimension 1 + bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), + error_regex="Size does not match at dimension 0") + + # Index must have long dtype + bad_idx = idx.to(torch.int32) + yield ErrorInput(SampleInput(src, args=(1, bad_idx)), + error_regex="Expected dtype int64 for index") + + # TODO: FIXME + # out.dtype must match src.dtype + # Creates new src & idx since SampleInputs can't share tensors + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + out = torch.empty((2, 2), device=device, dtype=torch.float64) + yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), + error_regex="Expected out tensor to have dtype") + + # src and index tensors must have the same # of dimensions + # idx too few dimensions + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor((0, 0), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # src too few dimensions + src = torch.tensor((1, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(0, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx,)), + error_regex="index 23 is out of bounds for dimension") + + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_take(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +# Error inputs for scatter +def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): + # Error when self.dtype != src.dtype (and src is not a scalar) + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.double) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected self.dtype to be equal to src.dtype") + + # Index dtype must be long + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected dtype int64 for index") + + # Index and destination must have the same number of dimensions + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as self tensor") + + # Index and src must have the same number of dimensions when src is not a scalar + src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as src tensor") + + # Index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="index 34 is out of bounds for dimension 0 with size 3") + +def error_inputs_renorm(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, + error_regex="needs at least 2 dimensions, got 0 dimensions") + + +def error_inputs_ormqr(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, + error_regex="input must have at least 2 dimensions") + + # https://github.com/pytorch/pytorch/issues/85218 + tensor_0 = torch.full((5, 0,), 1, device=device) + tensor_1 = torch.full((5,), 1, device=device) + tensor_2 = torch.full((5, 5,), 1, device=device) + bool_3 = True + bool_4 = True + yield ErrorInput(SampleInput(tensor_0, args=(tensor_1, tensor_2, bool_3, bool_4)), error_type=RuntimeError, + error_regex=r"tau.shape\[-1\] must be less than or equal to input.shape\[-1\]") + + +def error_inputs_diag(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + zero_d = torch.randn(1, 1, 1, device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + +def error_inputs_embedding(op_info, device, **kwargs): + indices = torch.rand(2, 2, device=device).long() + weights = [ + torch.tensor(1.0, device=device), + torch.tensor(1.0, device=device).reshape(1, 1, 1), + ] + + for weight in weights: + yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, + error_regex="'weight' must be 2-D") + + +def error_inputs_t(op_info, device, **kwargs): + yield ErrorInput( + SampleInput(torch.randn(2, 3, 4, 5, device=device)), + error_regex="expects a tensor with <= 2", + ) + + +def error_inputs_multinomial(op_info, device, **kwargs): + x = torch.empty(1, 2, 3, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="prob_dist must be 1 or 2 dim") + + x = torch.empty(1, 2, dtype=torch.long, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="multinomial only supports floating-point dtypes for input") + + x = torch.empty(1, 2, dtype=torch.double, device=device) + y = torch.empty(1, 2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), + error_regex="multinomial expects Long tensor out") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(0,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(-1,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3, False,)), + error_regex="cannot sample n_sample > prob_dist") + + x = torch.empty(16777217, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3,)), + error_regex="number of categories cannot exceed") + + inputs = ((1., -1., 1.), (1., inf, 1.), (1., -inf, 1.), (1., 1., nan)) + + err_msg1 = "probability tensor contains either `inf`, `nan` or element < 0" + err_msg2 = "invalid multinomial distribution" + + rep_arg = (False, True) if torch.device(device).type == 'cpu' else (False,) + + if torch.device(device).type == 'cpu': + for rep in rep_arg: + kwargs = {'num_samples': 2, 'replacement': rep} + + for shape in inputs: + # error case when input tensor contains `inf`, `nan` or negative element + yield ErrorInput(SampleInput(torch.tensor(shape), kwargs=kwargs), + error_regex=err_msg1 if rep is False else err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 1-D input + x = torch.zeros(3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 2-D input + x = torch.zeros(3, 3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution + x[1, :] = 1 + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + +def error_inputs_gradient(op_info, device, **kwargs): + for dtype in [torch.long, torch.float32, torch.complex64]: + t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) + + dim = (1, 0) + spacing = [0.1] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected spacing to be unspecified, a scalar ') + + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), + error_type=RuntimeError, + error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') + + dim = (1, 1) + spacing = 0.1 + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='dim 1 appears multiple times in the list of dims') + + dim = (0, 1) + coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each tensor to be on the same device,') + + yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), + error_type=IndexError, error_regex='') + + t = torch.tensor([[1], [2], [3]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + + t = torch.tensor([[1, 2], [3, 4]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + +def sample_inputs_rrelu(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=dict(lower=0., upper=1., training=True)) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S)) + yield SampleInput(make_arg(S), training=False) + +def error_inputs_rrelu(op_info, device, **kwargs): + input = make_tensor((S, S), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(input, kwargs={'lower': 0.3, 'upper': 0.1}), + error_regex='Lower bound should be less than or equal to the upper bound') + +def error_inputs_masked_select(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + y = torch.rand((6,), device=device) + mask = torch.tensor([True, False, True, True, False, False], device=device) + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_median(op_info, device, **kwargs): + x = torch.tensor([[[[[[[[[[[[[[[[[[[[[[[[[nan], + [nan]]]]]]]]]]]]]]]]]]]]]]]]], device=device) + if device == 'cuda': + yield ErrorInput(SampleInput(x, kwargs=dict(dim=(-1))), + error_type=RuntimeError, + error_regex='CUDA Tensors cannot have more than 25 dimensions') + else: + return + + +def error_inputs_index_select(op_info, device, **kwargs): + x = torch.rand((1, 6), device=device).expand((2, 6)) + y = torch.rand((3, 6), device=device) + ind = torch.tensor([0, 1], dtype=torch.int64, device=device) + + yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_index_add(op_info, device, **kwargs): + result = torch.tensor([[1., 2.], [4., 5.], [7., 8.]]) + source = torch.tensor([2., 4.]) + + yield ErrorInput(SampleInput(result, args=(0, torch.tensor([0, 2]), source)), + error_type=RuntimeError, + error_regex=r'source tensor shape must match self tensor shape, ' + r'excluding the specified dimension. Got self.shape = \[3, 2\] source.shape = \[2\]') + +def error_inputs_logcumsumexp(op_info, device, **kwargs): + dim = 3 + srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] + for src in srcs: + yield ErrorInput(SampleInput(src, args=(dim,)), + error_type=IndexError, + error_regex='Dimension out of range') + +def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((S, S)), gather_variable((S, S), 1, S, True, device=device), 0) + + # `indices` broadcast + yield SampleInput( + make_arg((S, S)), gather_variable((1, S // 2), 0, S, True, device=device), 1) + + # `self` broadcast + yield SampleInput( + make_arg((1, S)), gather_variable((S, S // 2), 0, S, True, device=device), 1) + + # without `dim` arg + yield SampleInput( + make_arg((S, S)), gather_variable((S, S // 2), 0, S, True, device=device)) + + +def error_inputs_aminmax_amax_amin(op_info, device, is_ref=False, **kwargs): + + # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. + shape = (S, 0, S) + err_msg_amax_amin = "reduction" + err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) + + # Error Inputs for tensors with more than 64 dimension + sizes = [1] * 65 + err_msg1 = "only tensors with up to 64 dims are supported" + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), + error_regex=err_msg1) + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), + error_regex=err_msg1) + + # Error Inputs for repeated 'dim' + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + dims = [(0, 0), (0, -4)] + err_msg2 = "in the list of dims" + x = torch.randn(S, S, S, S, device=device) + for dim in dims: + yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) + + # Error Input for illegal dtype + input5 = torch.randn(L, L, dtype=torch.float32, device=device) + max_values = torch.empty(L, dtype=torch.float32, device=device) + min_values = torch.empty(L, dtype=torch.double, device=device) + illegal_values = torch.empty(L, dtype=torch.int, device=device) + + # Unlike regular PyTorch, amax and amin refs don't require input and out + # dtypes to match exactly: + # https://github.com/pytorch/pytorch/pull/87765#pullrequestreview-1162023824 + if is_ref: + err_msg_amax_amin2 = ("Attempting to cast from torch.float32 to out tensor with dtype " + "torch.int32, but this can't be cast because it is not safe!") + else: + err_msg_amax_amin2 = ("Expected the dtype for input and out to match, but got Float " + "for input's dtype and Int for out's dtype.") + err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" + + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), + error_regex=err_msg_amax_amin2) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), + error_regex=err_msg_aminmax2) + + # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim + err_msg3 = "reduction" + # FIXME: eager and ref impl throw different types of errors + error_type = IndexError if 'refs' not in op_info.name else RuntimeError + yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), + error_type=error_type, error_regex=err_msg3) + +def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): + test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S, S), {}), + ((S, S, S), {'dim': 1}), + ((S, S, S), {'dim': 1, 'keepdim': True}), + ((), {'dim': 0}), + ((), {}), + ((), {'dim': 0, 'keepdim': True}), + ((S, 0, S), {'dim': 0}), + ) + + for shape, kwargs in test_cases: + yield SampleInput( + make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), + **kwargs) + +def error_inputs_diff(op_info, device, **kwargs): + t = torch.rand((1, 3), device=device) + n = -1 + yield ErrorInput(SampleInput(t, args=(n, ), kwargs=kwargs), + error_type=RuntimeError, + error_regex=f'order must be non-negative but got {n}') + +def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = ( + ((1,), 0, None, None), + ((S,), 0, None, None), + ((S, 1), 0, None, None), + ((S, 1), 1, None, None), + ((S, S), 0, None, None), + ((S, S), 1, None, None), + ((S, S), 0, (1, S), (2, S)), + ((S, S), 0, None, (2, S)), + ((XS, XS, XS), 1, None, None), + ((XS, XS, XS), 2, None, None), + ((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)), + ((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)), + ((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),) + + sample_inputs = [] + for size, dim, size_prepend, size_append in test_cases: + prepend_size = 0 if (size_prepend is None) else size_prepend[dim] + append_size = 0 if (size_append is None) else size_append[dim] + dim_size = size[dim] + prepend_size + append_size + for n in range(dim_size): + input_tensor = make_arg(size) + prepend = make_arg(size_prepend) if size_prepend else None + append = make_arg(size_append) if size_append else None + yield SampleInput(input_tensor, n, dim, prepend, append) + + # add some samples with n > dim_size + yield SampleInput(make_arg((XS, XS, XS)), S + 1, 1) + yield SampleInput(make_arg((XS, XS, XS)), S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS))) + +def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): + input_tensor = make_arg(size) + weight_tensor = make_arg(size) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = make_arg((bin_ct + 1,)) + sorted_bins, bins_indices = torch.sort(bins_tensor) + yield SampleInput(input_tensor, sorted_bins, + weight=weight_tensor, density=density) + +def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) + bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) + + for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): + input_tensor = make_arg(size) + bin_ct = bin_ct_pattern[:size[-1]] + weight_tensor = make_arg(size[:-1]) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = [make_arg(ct + 1) for ct in bin_ct] + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def error_inputs_histogramdd(opinfo, device, **kwargs): + invalid_bins = [1, 1, 1, 1, 1] + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + msg = "histogramdd: The size of bins must be equal to the innermost dimension of the input." + yield ErrorInput(SampleInput(make_arg(5, 6), invalid_bins), error_regex=msg) + +def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, min, max in product(sizes, [0, -10], [0, 10]): + # construct sample input omitting bins arg + yield SampleInput(make_arg(size), min=min, max=max) + + # construct sample inputs with a few different bins values + for bins in [1, 3, 10]: + yield SampleInput(make_arg(size), bins=bins, min=min, max=max) + +def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for size, weighted in product((S, M), [False, True]): + input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) + weight_tensor = make_arg((size,)) if weighted else None + + max_val = int(input_tensor.max().item()) + + for minlength in [0, max_val // 2, max_val, 2 * max_val]: + yield SampleInput( + input_tensor, weights=weight_tensor, minlength=minlength) + +def sample_inputs_bucketize(op_info, device, dtype, requires_grad, reference_inputs_mode=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = (((), S), ((S,), S), ((S, S), S), ((S, S, S), S), ((S, 1, S), S), ((S, 0, S), S)) + + if reference_inputs_mode: + sizes += (((256,), 128), ((128,), 256), ((32, 32), 11), ((32, 4, 32), 33)) + + for (input_shape, nb), out_int32, right in product(sizes, [False, True], [False, True]): + input_tensor = make_arg(input_shape) + boundaries = make_arg(nb).msort() + + yield SampleInput(input_tensor, boundaries, + out_int32=out_int32, right=right) + +reference_inputs_bucketize = partial(sample_inputs_bucketize, reference_inputs_mode=True) + +def error_inputs_bucketize(opinfo, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg((S, S, S)), make_arg((S, S))), + error_regex="boundaries tensor must be 1 dimension") + +def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # (unsorted tensor size, (input sizes,), is_scalar) + sizes = ( + ((0,), ((0,),), False), + ((M,), ((), (M,), (M, M)), False), + ((0, 0), ((0, 0),), False), + ((M, M), ((M, M),), False), + ((0, 0, 0), ((0, 0, 0),), False), + ((M, M, M), ((M, M, M),), False), + ((L,), ((),), True), + ) + + for (size, input_sizes, is_scalar), noncontiguous, out_int32, right in product( + sizes, [False, True], [False, True], [False, True] + ): + unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) + for input_size in input_sizes: + input = make_arg(input_size, noncontiguous=noncontiguous) + if is_scalar: + input = input.item() + if np.prod(size) == 0: + boundary_tensor = unsorted_tensor + sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) + else: + boundary_tensor, sorter = torch.sort(unsorted_tensor) + side = "right" if right else "left" + + yield SampleInput(boundary_tensor, input, out_int32=out_int32, right=right) + yield SampleInput(boundary_tensor, input, out_int32=out_int32, side=side) + + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, right=right, sorter=sorter) + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, side=side, sorter=sorter) + +def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + test_cases_float = ( + ((S,), None, None, 1), + ((S,), 2., None, 1), + ((S, S), None, None, 2), + ((S, S), [2.0, 2.1], None, 1), + ((S, S), [2.0, 2.1], (0, 1), 1), + ((4, 4, 4), [2., 1.], (0, 1), 2), + ) + for size, spacing, dim, edge_order in test_cases_float: + t = make_arg(size) + yield SampleInput(t, dim=dim, spacing=spacing, edge_order=edge_order) + + test_cases_tensor = ( + ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), + ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), + ) + for size, coordinates, dim, edge_order in test_cases_tensor: + t = make_arg(size) + coordinates_tensor_list = [] + for coords in coordinates: + # `coords` will always contain floating point values and Python 3.10 does not support this + # implicit conversion to an integer using `__int__` + # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed + a = torch.tensor(coords, device=device) + coordinates_tensor_list.append(a.to(dtype)) + yield SampleInput(t, dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order) + +def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_args = [ + ([1, 2],), + (slice(0, 3),), + ([slice(0, 3), 1],), + ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), + ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), + ([slice(None), slice(None), [0, 3]],), + ([slice(None), [0, 3], slice(None)],), + ([[0, 3], slice(None), slice(None)],), + ([[0, 3], [1, 2], slice(None)],), + ([[0, 3], ],), + ([[0, 3], slice(None)],), + ([[0, 3], Ellipsis],), + ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), + (index_variable(2, S, device=device),), + (mask_not_all_zeros((S,)),), + ] + + for args in test_args: + yield SampleInput(make_arg((S, S, S)), args=args) + + yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) + +def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for accumulate in [False, True]: + # Test with indices arg + yield SampleInput( + make_arg((S, S,)), + (index_variable(2, S, device=device),), + make_arg((2, S)), + accumulate=accumulate) + + # Test with mask arg + mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) + yield SampleInput( + make_arg((S, S)), (mask, ), make_arg((S,)), accumulate=accumulate) + +def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): + def small_3d_unique(): + res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + def large_1d_unique(): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique()) + + # Test cases for small 3d tensors. + # Imitates legacy tests from test/test_torch.py + dims = range(-3, 3) + flag = [True, False] + for dim, descending, stable in product(dims, flag, flag): + # default schema without stable sort + yield SampleInput(small_3d_unique(), dim, descending) + # schema with stable sort, no CUDA support yet + if torch.device(device).type == 'cpu': + yield SampleInput( + small_3d_unique(), dim=dim, descending=descending, stable=stable) + + # Test cases for scalar tensor + tensor_opt = dict(dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(torch.tensor(1, **tensor_opt)) + yield SampleInput(torch.tensor(1, **tensor_opt), 0) + yield SampleInput(torch.tensor(1, **tensor_opt), 0, True) + + # Test cases for empty tensor + yield SampleInput(torch.tensor((), **tensor_opt)) + yield SampleInput(torch.tensor((), **tensor_opt), 0) + yield SampleInput(torch.tensor((), **tensor_opt), 0, True) + + # Test cases for stable sort + yield SampleInput(small_3d_unique(), stable=True) + yield SampleInput(small_3d_unique(), dim=0, stable=True) + yield SampleInput(small_3d_unique(), dim=0, descending=True, stable=True) + +def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S)) + for x_size in sizes: + # threshold and values args must be numbers + yield SampleInput(make_arg(x_size), make_arg(()).item(), make_arg(()).item()) + +def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for shape, sorted, return_inverse, return_counts, dim in \ + product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): + # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim + if 0 in shape and shape.index(0) is not dim: + continue + + # skip invalid dim args + if dim is not None and (dim < -len(shape) or dim >= len(shape)): + continue + + kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + + # construct a test case with only one distinct value + input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with mixed 0s and 1s + input_t = make_arg(shape, dtype=torch.bool, requires_grad=False)\ + .to(dtype).requires_grad_(requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with many different values + yield SampleInput(make_arg(shape), **kwargs) + +def sample_inputs_unique_consecutive(*args, **kwargs): + for sample_input in sample_inputs_unique(*args, **kwargs): + if not sample_input.kwargs["sorted"]: + sample_input.kwargs.pop("sorted") + yield sample_input + +def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8), (5,)), + ((3, 8, 8), 5), + ((3, 8, 8), 1) + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((1, 8, 8, 8), (5, 7)), + ((2, 8, 8, 8), (None, 7)), + ((1, 8, 4, 3), (5, None)), + ((1, 8, 4, 3), (None, None)), + ((1, 8, 4, 3), (5)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 2") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8, 8, 8), (5, 7, 4)), + ((1, 8, 4, 3, 7), (None, None, None)), + ((1, 8, 4, 3, 7), (1, 1, 1)), + ((3, 3, 8, 8, 6), (5, 7, None)), + ((1, 3, 8, 8, 6), (5, None, 2)), + ((3, 3, 8, 8, 6), (None, 3, 2)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 3") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8), (5,)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((3, 4, 4), 3), + ((3, 4, 4), 1) + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + + +def error_inputs_adaptive_max_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="Trying to create tensor with negative dimension") + +def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8), (5, 7)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 4), (2, 3)), + ((2, 4, 4, 4), (None, 3)), + ((2, 4, 4, 4), (1, 1)), + ((1, 4, 4, 3), (3, None)), + ((1, 4, 4, 3), (None, None)), + ((1, 4, 4, 3), (3)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="Trying to create tensor with negative dimension") + + +def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8, 8), (5, 7, 4)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 3, 5), (None, None, None)), + ((1, 4, 4, 3, 5), (1, 1, 1)), + ((3, 3, 4, 4, 6), (2, 3, None)), + ((1, 3, 4, 4, 6), (3, None, 2)), + ((3, 3, 4, 4, 6), (None, 3, 2)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="Trying to create tensor with negative dimension") + + +class _TestParamsMaxPoolBase: + + def __init__(self) -> None: + self.kwargs = { + 'kernel_size': [3], + 'stride': [2, None], + 'ceil_mode': [True, False], + 'padding': [0, 1], + 'dilation': [1], + 'return_indices': [True, False] + } + + self.shapes = [ + [1, 2, None], # batch + [2], # channels + [3, 6] # signal + ] + + def _gen_shape(self): + for shape in product(*self.shapes): + # shape[0] is None indicates missing batch dimension + if shape[0] is None: + shape = shape[1:] + + yield shape, torch.contiguous_format + # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format + if len(self.shapes) == 4 and len(shape) == 4: + yield shape, torch.channels_last + + def _gen_kwargs(self): + keys = self.kwargs.keys() + for values in product(*self.kwargs.values()): + yield dict(zip(keys, values)) + + def gen_input_params(self): + yield from product(self._gen_shape(), self._gen_kwargs()) + +class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): + + def __init__(self) -> None: + super().__init__() + self.kwargs['kernel_size'] += [(3,)] + self.kwargs['stride'] += [(2,)] + self.kwargs['padding'] += [(1,)] + self.kwargs['dilation'] += [(1,)] + +class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): + + def __init__(self) -> None: + super().__init__() + self.kwargs['kernel_size'] += [(3, 2)] + self.kwargs['stride'] += [(2, 1)] + self.kwargs['padding'] += [(1, 1)] + self.kwargs['dilation'] += [(1, 2)] + + self.shapes.append([6]) + +class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): + + def __init__(self) -> None: + super().__init__() + self.kwargs['kernel_size'] += [(3, 2, 3)] + self.kwargs['stride'] += [(2, 1, 2)] + self.kwargs['dilation'] += [(1, 2, 1)] + + self.shapes.append([6]) + self.shapes.append([5]) + +def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + params_generator_type_dict = { + 'nn.functional.max_pool1d': _TestParamsMaxPool1d, + 'nn.functional.max_pool2d': _TestParamsMaxPool2d, + 'nn.functional.max_pool3d': _TestParamsMaxPool3d, + 'max_pool2d_with_indices_backward': _TestParamsMaxPool2d, + } + + params_generator = params_generator_type_dict[op_info.name]() + for (shape, memory_format), kwargs in params_generator.gen_input_params(): + arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) + yield SampleInput(arg, kwargs=kwargs) + +def max_pool2d_backward(*args, kernel_size=(), stride=(), padding=(0,), dilation=(1,), ceil_mode=False, **kwargs): + out, indices = torch.nn.functional.max_pool2d_with_indices( + *args, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=True) + grad_out = torch.ones_like(out) + if stride is None: + stride = kernel_size + out_b = torch.ops.aten.max_pool2d_with_indices_backward.default( + grad_out, *args, kernel_size, stride, padding, dilation, ceil_mode, indices) + return out_b + +def error_inputs_max_pool1d(op_info, device, **kwargs): + # Toggle requires_grad because `max_pool1d` has different path + # based on whether `requires_grad` is set or not. + for requires_grad in (True, False): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=requires_grad) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > ((kernel_size - 1) * dilation + 1) / 2, when dilation is not default + yield ErrorInput(SampleInput(x, + kwargs={'kernel_size': 3, 'dilation': 2, 'stride': 1, 'padding': 3, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for input tensor + error_msg = r'Expected 2D or 3D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((), requires_grad=requires_grad), kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input + yield ErrorInput(SampleInput(torch.tensor([], device=device, requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: unbatched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((0, 10), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((1, 10, 0), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input with stride=0 + error_msg = 'stride must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 0}), + error_regex=error_msg) + + # error inputs for empty input with dilation=0 + error_msg = 'dilation must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), + kwargs={'kernel_size': 1, 'stride': 1, 'padding': 0, 'dilation': 0}), + error_regex=error_msg) + + # error inputs for invalid output size + error_msg = 'Invalid computed output size: -2' + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), + kwargs={'kernel_size': 5, 'stride': 1, 'padding': 0, 'dilation': 1}), + error_regex=error_msg) + + # error inputs when kernel_size=0 + error_msg = 'kernel_size must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 0}), + error_regex=error_msg) + + # error inputs for strides > 0 + error_msg = 'stride must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 0}), + error_regex=error_msg) + + +def error_inputs_max_pool2d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size : int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size : tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected 3D or 4D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((1, 0, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 10, 0)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def error_inputs_max_pool3d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49, 50)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size: int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size: tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of effective kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected input\'s non-batch dimensions to have positive length' + yield ErrorInput(SampleInput(make_arg((0, 1, 2, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched inputs with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 0, 1, 2)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((2, 1, 4, 5), {'p': 1., 'dim': 2}), + ((2, 3, 4, 5), {'p': 2., 'dim': 1}), + ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), + ((1, 3, 4, 5), {'p': -1., 'dim': 1}), + ((1, 3, 4, 5), {'p': 0., 'dim': -1}), + ((), {'p': 1.2, 'dim': 0}), + ((2, 3, 4, 5), {}), + ((2, 3, 4, 5), {'eps': 1e-4})) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), kwargs=kwargs) + + +def complex_conv(fn, input_size, weight, grad_output, stride, padding, dilation, groups): + # conv(W, x, b) = conv(Wr, xr, br) - conv(Wi, xi, 0) + i(conv(Wi, xr, bi) + conv(Wr, xi, 0)) + # a = conv(Wr, xr, br), + # b = conv(Wi, xi, 0), + # c = conv(Wr + Wi, xr + xi, br + bi) + # conv(W, x, b) = a - b + i(c - a - b) + + grad_output_ = torch.view_as_real(grad_output) + grad_output_r = grad_output_[..., 0] + grad_output_i = grad_output_[..., 1] + + weight_ = torch.view_as_real(weight) + weight_r = weight_[..., 0] + weight_i = weight_[..., 1] + + a = fn(input_size, weight_r, grad_output_r, stride, padding, dilation, groups) + b = fn(input_size, weight_i, grad_output_i, stride, padding, dilation, groups) + c = fn(input_size, weight_r + weight_i, grad_output_r + grad_output_i, stride, padding, dilation, groups) + + return (a - b) + 1j * (c - a - b) + + +def conv_transpose_ref(input, weight, bias, stride=1, padding=0, + output_padding=0, dilation=1, groups=1, + fn=None): + # Derivative of `conv` is `conv_transpose`. + # To verify the correctness of `conv_transpose`, + # we rely `torch.nn.grad` implementation (which is tested in test_nn.py) + # for floating dtypes. + + assert fn is not None + + grad_fn_map = {torch.nn.functional.conv_transpose1d: torch.nn.grad.conv1d_input, + torch.nn.functional.conv_transpose2d: torch.nn.grad.conv2d_input, + torch.nn.functional.conv_transpose3d: torch.nn.grad.conv3d_input} + batched_dim_map = {torch.nn.functional.conv_transpose1d: 3, + torch.nn.functional.conv_transpose2d: 4, + torch.nn.functional.conv_transpose3d: 5} + + # Input for `ref` is ndarray. + input, weight = torch.from_numpy(input), torch.from_numpy(weight) + + is_batched = len(input.shape) == batched_dim_map[fn] + if not is_batched: + input = input.unsqueeze(0) + + if bias is not None: + bias = torch.from_numpy(bias) + unsqueeze_dims = input.ndim - 2 + for _ in range(unsqueeze_dims): + bias = bias.unsqueeze(1) + + grad_output = input + # Get the input shape for grad_fn. + conv_transpose_output = fn(grad_output.to('meta'), weight.to('meta'), None, + stride=stride, padding=padding, output_padding=output_padding, + groups=groups, dilation=dilation) + input_size = conv_transpose_output.shape + + grad_fn = grad_fn_map[fn] + if weight.dtype.is_complex: + out = complex_conv(grad_fn, input_size, weight, grad_output, stride, padding, dilation, groups) + else: # Floating + out = grad_fn(input_size, weight, grad_output, stride, padding, dilation, groups) + + if bias is not None: + out = out + bias + + return out.squeeze(0) if not is_batched else out + + +def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4), (3, 3, 3), (3,), + {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), + ((2, 2, 4), (2, 2, 4), (4,), + {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), + ((1, 1, 4), (1, 1, 4), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), + ((1, 1, 4), (1, 2, 3), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5), (4, 8, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), + ((2, 2, 4, 4), (2, 2, 4, 5), (4,), + {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), + ((1, 1, 4, 5), (1, 1, 4, 3), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 1, 4, 3), (1, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((2, 4, 4, 4), (4, 1, 3, 3), None, {'groups': 4}), + ((1, 2, 5, 5), (2, 4, 3, 3), None, {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + +def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), + {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), + ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), + {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), + ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), + ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias, + # and a dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), + ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), + ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), + ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), + # With defaults + ((1, 4, 5), (3, 4, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4)), args=(make_int_arg((1, 1, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2)), make_arg((1,)))), + error_regex="weight should have at least three dimensions") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def error_inputs_conv2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((2, 4, 4)), args=(make_int_arg((3, 2, 3, 3)), make_arg((3,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((2, 4, 4)), args=(make_arg((3, 2, 3, 3)), make_complex_arg((3,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 2, 2, 3)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2, 4)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 2)), args=(make_arg((1, 1, 2, 5)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 1, 3, 2)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 3-dimensional input for 3-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for groups the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 5)), args=(make_arg((2, 2, 1, 4)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 4, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, groups, dilation) + cases: Tuple = ( + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'groups': 1}), + ((2, 4, 8, 8), (2, 2, 3, 3), (2,), + {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 2, 4, 3), (4, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'groups': 1}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': "valid"}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 1, 'padding': "same", 'dilation': 3}), + # Below are the group related samples from common_nn.py + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), + ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), + ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), + # With defaults + ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv3d(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 1, 4, 4, 4), (1, 1, 1, 1, 1), (1,), {'padding': 'same'}), + ((1, 1, 4, 4, 4), (1, 1, 4, 4, 4), (1,), {'stride': (2, 2, 2)}), + ((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1,), {'dilation': 2}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same', 'dilation': 2}), + ((1, 1, 10, 11, 12), (1, 1, 4, 4, 4), None, {'padding': 'same', 'dilation': 3}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'groups': 3}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'stride': (2, 2, 2), 'dilation': 1, 'groups': 3}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4, 4, 4)), args=(make_int_arg((1, 1, 2, 2, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 3, 3, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 3, 4, 5)), args=(make_arg((1, 1, 4, 3)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 4-dimensional input for 4-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), + error_regex="non-positive groups is not supported") + + # error inputs for padding='same' not supported by strided convolutions + yield ErrorInput( + SampleInput(make_arg((18, 27, 9, 1, 9)), args=(make_arg((9, 9, 9, 1, 9)), + make_arg((9,))), kwargs={'stride': 2, 'padding': 'same', 'groups': 3}), + error_regex="padding='same' is not supported for strided convolutions") + + +def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'eps' : 0.5}), + ((2, 6, 3), 2, {'eps' : -0.5}), + ((1, 3), 1, {'eps' : 1e-5}), + ((0, 2), 1, {'eps' : 1e-5}), + ((S, S, S), 1, {'eps' : 0.5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(make_arg(input_shape), num_groups, **kwargs) + + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=(1,)) + +def reference_inputs_group_norm(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_group_norm( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((20, 6, 10, 10), 3, {'eps' : 1e-5}), + # equivalent with InstanceNorm + # GroupNorm(C, num_groups=C) == InstanceNorm(num_features=C) + ((20, 6, 10, 10), 6, {'eps' : 1e-5}), + # equivalent with LayerNorm + # GroupNorm(C, num_groups=1, affine=False) == LayerNorm(normalized_shape=[C, H, W], elementwise_affine=False) + ((20, 6, 10, 10), 1, {'eps' : 1e-5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + input_tensor = make_arg(input_shape) + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(input_tensor, num_groups, **kwargs) + + +def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), + ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), + ((3, 2, 4), {'momentum': -1.2}), + ((3, 2, 4), {'momentum': 0.0}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] + weight = make_arg(channels) + bias = make_arg(channels) + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + new_kwargs = { + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': weight, + 'bias': bias, + **kwargs + } + + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs=new_kwargs + ) + + # Checking for permutations of weights and biases as `None` + # instance_norm assumes that if there's a bias, there's a weight + weights = [channels, None] + biases = [None, None] + + for weight_channels, bias_channels in zip(weights, biases): + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs={ + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': make_arg(weight_channels) if weight_channels is not None else None, + 'bias': make_arg(bias_channels) if bias_channels is not None else None + } + ) + + # Test case for no optional kwargs + yield SampleInput(make_arg((1, 2, 3)), kwargs={}) + +def sample_inputs_safe_softmax(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + def make_bool_mask(*shape): + return torch.randint(0, 2, shape, device=device, dtype=torch.bool) + + def mask_two_rows(rows, cols): + mask_two_rows = torch.ones((rows, cols), dtype=torch.bool, device=device) + mask_two_rows[rows - 1] = False + mask_two_rows[rows - 3] = False + return mask_two_rows + + def convert_to_float_mask(mask: torch.Tensor) -> torch.Tensor: + return torch.where(~mask, float('-inf'), 0.0) + + def with_requires_grad(tensor): + return tensor.requires_grad_(requires_grad) + + def generate_input_from_mask(mask_shape, dim): + mask = make_bool_mask(*mask_shape) + input_tensor = make_arg(mask_shape) + masked_input = input_tensor + convert_to_float_mask(mask) + return SampleInput(with_requires_grad(masked_input), kwargs={'dim': dim}) + + samples = [ + # Basic 3D tensor with mask + generate_input_from_mask((2, 3, 4), dim=1), + # 2D tensor with mask, testing different dim + generate_input_from_mask((5, 5), dim=0), + # 4D tensor, testing with a different dim + generate_input_from_mask((2, 3, 4, 5), dim=2), + # Edge case: 1D tensor + generate_input_from_mask((10,), dim=0), + # Edge case: tensor with one dimension of size 1 + generate_input_from_mask((1, 5, 5), dim=1), + # Testing with all elements masked + SampleInput( + with_requires_grad( + make_arg((3, 3)) + + convert_to_float_mask( + torch.zeros((3, 3), dtype=torch.bool, device=device) + ) + ), + kwargs={"dim": 1}, + ), + # Testing with no elements masked + SampleInput( + with_requires_grad( + make_arg((3, 3)) + + convert_to_float_mask( + torch.ones((3, 3), dtype=torch.bool, device=device) + ) + ), + kwargs={"dim": 1}, + ), + # Testing with two rows masked + SampleInput( + with_requires_grad( + make_arg((6, 3)) + convert_to_float_mask(mask_two_rows(6, 3)) + ), + kwargs={"dim": 1}, + ), + ] + yield from samples + +def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + + # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, + # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 + + # With weight and a `None` bias + # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) + + # With `None` weight and bias (tests failing for this, see the link above) + # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) + + +def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape, eps + cases: Tuple[Tuple[int], Tuple[int], float] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), 0.5), + ((2, 2, 3), (2, 3), -0.5), + ((1,), (1,), 1e-5), + ((1, 2), (2,), 1e-5), + ((0, 1), (1,), 1e-5), + ) + + for input_shape, normalized_shape, eps in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, None, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, None, eps), + ) + +def sample_inputs_rms_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + +def error_inputs_group_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # check that input has minimum number of dimensions + err_msg1 = "Expected at least 2 dimensions for input tensor but received" + s1 = SampleInput(make_arg(1), args=(1,)) + yield ErrorInput(s1, error_regex=err_msg1) + + # check that the channels dimension is compatible with number of groups + err_msg2 = "Expected number of channels in input to be divisible by num_groups, but got input of shape" + s2 = SampleInput(make_arg((2, 7, 4)), args=(2,)) + yield ErrorInput(s2, error_regex=err_msg2) + +def error_inputs_native_layer_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=((), None, None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + bias = make_arg((1, 2)) + err_msg3 = "Expected bias to be of same shape as normalized_shape" + s3 = SampleInput( + make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5) + ) + yield ErrorInput(s3, error_regex=err_msg3) + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + +def error_inputs_rms_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=((), None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + + +def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, size and a kwarg dict for alpha, beta, and k + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), + ((1, 6, 3), 2, {'alpha': 3e-05}), + ((1, 6, 3), 2, {'beta': 0.5}), + ((1, 6, 3), 2, {'k': 1.25}), + ((1, 6, 3), 2, {}), + ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ) + + for input_shape, size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) + +def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): + N = 5 + # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-5, high=5) + return (SampleInput(make_arg((N * 2, N * 2))) for _ in range(1, N)) + +def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4], [8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor = create_tensor(batch_shape + [in_feat]) + weight = create_tensor([out_feat, in_feat]) + if not has_bias: + yield SampleInput(input_tensor, weight) + continue + + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor, weight, bias) + + # 5D tensor, used to crash on MPS, see https://github.com/pytorch/pytorch/issues/114942 + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2)) + yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2), create_tensor(4)) + +def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4, 5], [8, 8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor1 = create_tensor(batch_shape + [in_feat1]) + input_tensor2 = create_tensor(batch_shape + [in_feat2]) + weight = create_tensor([out_feat, in_feat1, in_feat2]) + if not has_bias: + yield SampleInput(input_tensor1, input_tensor2, weight) + continue + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor1, input_tensor2, weight, bias) + +def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): + features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for features, batch_shape in itertools.product(features_options, batch_options): + ndim = len(features) + len(batch_shape) + for dim in range(ndim): + input_tensor = create_tensor(batch_shape + features) + dim_size = input_tensor.size(dim) + if dim_size > 0 and dim_size % 2 == 0: + yield SampleInput(input_tensor, dim) + +def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + align_corners_options: Tuple[Any, ...] = (None,) + if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): + align_corners_options = (True, False, None) + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'nearest-exact': [1, 2, 3], + 'linear': [1], + 'bilinear': [2], + 'bicubic': [2], + 'trilinear': [3], + 'area': [1, 2, 3] + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return tuple([N, C] + ([size] * rank)) + return tuple([size] * rank) + + if mode in ('bilinear', 'bicubic') and dtype == torch.uint8: + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for a more close to typical image processing usage + rank = 2 + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg(shape(270, rank), memory_format=memory_format), + shape(130, rank, False), + scale_factor=None, + mode=mode, + align_corners=False, + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for align_corners in align_corners_options: + for rank in ranks_for_mode[mode]: + yield SampleInput( + make_arg(shape(D, rank)), + shape(S, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + yield SampleInput( + make_arg(shape(D, rank)), + shape(L, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + for recompute_scale_factor in [False, True]: + for scale_factor in [1.7, 0.6]: + yield SampleInput( + make_arg(shape(D, rank)), + size=None, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + +def reference_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', 'bicubic'): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + for aa in [True, False]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + scale_factor=None, + mode=mode, + align_corners=False, + antialias=aa, + ) + +def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'bilinear': [2], + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return torch.Size([N, C] + ([size] * rank)) + return torch.Size([size] * rank) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for rank in ranks_for_mode[mode]: + yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), size=shape(L, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=1.7) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) + +def reference_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', ): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide a single sample for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + ) + +def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): + N = 6 + C = 3 + H = 10 + W = 20 + S = 3 + L = 5 + + input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=None, align_corners=False, scale_factors=[1.7, 0.9]) + yield SampleInput(input_tensor, output_size=None, align_corners=True, scale_factors=[0.8, 1.0]) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=None, scales_w=None) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=1.7, scales_w=0.9) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=True, scales_h=1.7, scales_w=0.9) + +def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): + N = 5 + for _ in range(1, N): + for approximate in ['none', 'tanh']: + yield SampleInput( + make_tensor((N * 2, N * 2), device=device, dtype=dtype, + requires_grad=requires_grad, low=-3, high=3), + approximate=approximate) + + +def error_inputs_gelu(op, device, **kwargs): + # Tests that gelu errors out when passed an approximation we don't know. + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}), + error_regex="approximate argument must be either") + + +def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): + inputs = [] + args_for_reduction_with_dim = ( + ((S, S, S), (1,),), + ((S, S, S), (1, True, ),), + ((), (0,),), + ((), (0, True,),), + ) + return ((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *args)) + for input_tensor, args in args_for_reduction_with_dim) + +def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + +def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): + yield from _generate_reduction_inputs(device, dtype, requires_grad) + # NaN only exists for floating point numbers + if dtype.is_complex or dtype.is_floating_point: + yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) + yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_nan_reduction(supports_multiple_dims): + # Generates sample inputs for reduction ops that contain the input tensor + # and dim and keepdim kwargs. If a reduction op needs to test additional + # args/kwargs then create a separate sample_inputs function + def fn(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + yield SampleInput(t.clone().requires_grad_(requires_grad)) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): + yield SampleInput(t.clone().requires_grad_(requires_grad), **kwargs) + + return fn + +def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): + test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) + test_interpolations = ['linear', 'midpoint'] + + for quantiles in test_quantiles: + for t in _generate_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): + # Interpolation kwarg for now is only supported when providing both dim and keepdim + kwargs.setdefault('dim', 0) + kwargs.setdefault('keepdim', False) + for interpolation in test_interpolations: + kwargs['interpolation'] = interpolation + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles, **kwargs) + +def sample_inputs_reduction_count_nonzero(*args, **kwargs): + """Sample inputs for count_nonzero""" + # count_nonzero does not support keepdim yet + for sample in sample_inputs_reduction(*args, **kwargs): + sample.kwargs.pop('keepdim', None) + yield sample + +def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): + N = 10 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + return (SampleInput(make_arg((N, N))) for _ in range(1, N)) + +def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((1, 3, 9, 9), 3), + ((1, 3, 9, 9), (4, 4)), + ((1, 3, 9, 9), (6, 6)), + ((2, 3, 9, 9), (3, 3)), + ((1, 1, 4, 4), (2, 2)), + ((1, 2, 6, 6), (4, 4))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((2, 3, 5, 5, 5), (2, 2, 2)), + ((1, 2, 6, 5, 4), 2), + ((1, 2, 5, 6, 5), (2, 3, 2)), + ((1, 2, 6, 6, 6), (2, 3, 2)), + ((1, 1, 7, 6, 7), (2, 3, 4)), + ((1, 1, 4, 5, 4), (2, 2, 1)), + ((1, 1, 8, 7, 6), (4, 3, 2)), + ((0, 1, 4, 5, 4), (2, 2, 1))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3, 2), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), + ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), + ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), + ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), + ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), + ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) + + for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: + yield SampleInput(make_arg(input_shape), + args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) + # Case with just input_shape and kernel_size + yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) + +def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, kwargs + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 9), (3,), {}), + ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), + ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), + ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), + ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 3, 4, 4), (2, 2, 2), {}), + ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, + count_include_pad=False, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, + count_include_pad=True, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), + ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, + count_include_pad=False, divisor_override=2)), + ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=-2)), + ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, + count_include_pad=True, divisor_override=None)), + ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def error_inputs_avg_pool1d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + +def error_inputs_avg_pool2d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + +def error_inputs_avg_pool3d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49, 50], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of effective kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + + # error inputs for invalid input dimension + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 0}), + error_regex='non-empty 4D or 5D') + + +def sample_inputs_to(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # test_multiple_devices_to_cuda would fail if we use a different device than given + devices = [device] + if torch.device(device).type == 'cpu': + devices = [torch.device('cpu'), torch.device('cuda:0')] if torch.cuda.is_available() else devices + memory_formats = [torch.preserve_format, torch.channels_last] + + # TODO: can't switch `to.device` overload to use positional arguments + # https://github.com/pytorch/pytorch/issues/84265 + # to.device overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(device, torch.float64, nb, cp), kwargs=kwargs) + + # to.dtype overload + for nb, cp, mem_f in product([True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(torch.float64, nb, cp), kwargs=kwargs) + + # to.other overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + other = make_arg((S, S, S, S), dtype=torch.float64, device=device) + yield SampleInput(make_arg((S, S, S, S)), args=(other, nb, cp), kwargs=kwargs) + + +def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): + def get_tensor_input(size): + return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(get_tensor_input((S, M, S)), 3) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True, True) + + yield SampleInput(get_tensor_input(()), 1) + yield SampleInput(get_tensor_input(()), 1, 0) + yield SampleInput(get_tensor_input(()), 1, -1) + yield SampleInput(get_tensor_input(()), 1, 0, True) + yield SampleInput(get_tensor_input(()), 1, -1, True) + yield SampleInput(get_tensor_input(()), 1, 0, True, True) + yield SampleInput(get_tensor_input(()), 1, -1, True, True) + +def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(M)) + +def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) + ps = (2, 4) + + for size_x, size_y, p in product(sizes, sizes, ps): + yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_index(op_info, device, dtype, requires_grad, reference=False, **kwargs): + # target.index_select(dim, idx) + select = "index_select" in op_info.name + # target.index_add(dim, idx, source, *, alpha=1) + add = "index_add" in op_info.name + # target.index_copy(dim, idx, source) + copy = "index_copy" in op_info.name + # target.index_fill(dim, idx, value) + fill = "index_fill" in op_info.name + + # Extended reference inputs. We generate that exercise atomic adds / writing + # several times to one location + if reference: + make_arg = partial(torch.ones, device=device, dtype=dtype, requires_grad=requires_grad) + make_idx = partial(torch.zeros, device=device, dtype=torch.int64) + else: + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # idx They need to be different for copy and add to be deterministic + if copy or add: + make_idx = partial(torch.randperm, device=device, dtype=torch.int64) + else: + def make_idx(n): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) + + shapes = [(), (1,), (S, S)] + # extra parameter for add + if add: + if dtype == torch.bool: + alphas = (True, False) + else: + alphas = (-1, 0, 2) + else: + alphas = (None,) + + if fill: + # A weird number to catch errors. + # The former one tests `index_fill.int_Scalar`, and the latter one tests `index_fill.int_Tensor`. + values = (make_arg((1,)).item(), make_arg(())) + else: + values = (None,) + + for shape, alpha, value in product(shapes, alphas, values): + t = make_arg(shape) + args = [] + + # dim. We handle the scalar case + dim = -1 if t.ndim == 2 else 0 + args.append(dim) + + idx = make_idx(t.shape[dim] if t.ndim != 0 else 1) + args.append(idx) + + # source + if copy or add: + args.append(make_arg(shape)) + elif fill: + args.append(value) + + args = tuple(args) + kwargs = {} if alpha is None else {"alpha": alpha} + + yield SampleInput(t, args=args, kwargs=kwargs) + +def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) + + shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] + include_selfs = (True, False) + reduce = op_info.variant_test_name + assert reduce in ('prod', 'mean', 'amin', 'amax') + + for shape, include_self in product(shapes, include_selfs): + self_shape, src_shape = shape + # dim. We handle the scalar case + dim = 1 if len(self_shape) >= 2 else 0 + idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, + self_shape[dim] if len(self_shape) != 0 else 1) + args = (dim, idx, make_arg(src_shape), reduce) + yield SampleInput(make_arg(self_shape), + args=args, + kwargs={'include_self' : include_self}) + + # Sample inputs to test edge cases for backward + if requires_grad and reduce == 'prod': + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) + # (c) no zeros reduced (self[2, 1], self[2, 2]) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(0, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs__unsafe_masked_index(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m, dim, d): + view_shape = [1] * dim + view_shape[d] = n + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m).view(view_shape) + + cases = [ + ((S, S), S, M), + ((S, S), M, S), + ((S, S, S), S, M), + ] + + fill_value = make_tensor([], dtype=dtype, device="cpu").item() + + for c in cases: + self_shape, high, idx_size = c + dim = len(self_shape) + indices = [make_idx(idx_size, high, dim, d) for d in range(dim)] + masks = [torch.logical_and(idx >= 0, idx < self_shape[i]) for i, idx in enumerate(indices) if idx is not None] + mask = functools.reduce(torch.logical_and, masks) + yield SampleInput(make_arg(self_shape), mask, indices, fill_value) + + masks = [torch.logical_and(idx >= 1, idx < self_shape[i] - 1) for i, idx in enumerate(indices) if idx is not None] + mask = functools.reduce(torch.logical_and, masks) + yield SampleInput(make_arg(self_shape), mask, indices, fill_value) + +def sample_inputs__unsafe_masked_index_put_accumulate(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m, dim, d): + view_shape = [1] * dim + view_shape[d] = n + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m).view(view_shape) + + cases = [ + ((S, S), S, (M, M)), + ((S, S), M, (S, S + 1)), + ((S, S, S), S, (M, M - 1, M + 1)), + ] + + fill_value = make_tensor([], dtype=dtype, device="cpu").item() + + for c in cases: + self_shape, high, idx_sizes = c + dim = len(self_shape) + indices = [make_idx(idx_sizes[d], high, dim, d) for d in range(dim)] + masks = [torch.logical_and(idx >= 0, idx < self_shape[i]) for i, idx in enumerate(indices) if idx is not None] + mask = functools.reduce(torch.logical_and, masks) + values = make_arg(idx_sizes) + yield SampleInput(make_arg(self_shape), mask, indices, values) + + masks = [torch.logical_and(idx >= 1, idx < self_shape[i] - 1) for i, idx in enumerate(indices) if idx is not None] + mask = functools.reduce(torch.logical_and, masks) + yield SampleInput(make_arg(self_shape), mask, indices, values) + + +def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): + args = ( + ((S, S, S), (),), + ((S, S, S), (1, ),), + ((S, S, S), (1, True, ),), + ((), (),), + ((), (0,),), + ((), (0, True,),), + # Non-fused mode kernel on CUDA + ((3000,), ()), + ) + make_arg = partial(make_tensor, dtype=dtype, device=device, + requires_grad=requires_grad, low=None, high=None) + return (SampleInput(make_arg(input_tensor), *args) + for input_tensor, args in args) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs + idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] + idx_list = [idx, -idx - 1] + for idx, acc in product(idx_list, (True, False)): + yield SampleInput(input=make_arg((S, S)), + args=(idx.clone(), + make_arg((S,)), + acc)) + + # Scalar cases + scalar_sizes = [(), (1,)] + tgt_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + src_gen = (make_arg(size) for size in scalar_sizes) + for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + + # Empty cases + tgt_sizes = [(0,), (), (1,), (3, 2)] + tgt_gen = (make_arg(size) for size in tgt_sizes) + idx = make_idx((0,), high=1) + src = make_arg((0,)) + for tgt, acc in product(tgt_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + +def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs: take S elements out of S * S + index = make_idx((S,), high=(S * S)) + for idx in (index, -index - 1): + yield SampleInput(input=make_arg((S, S)), args=(idx,)) + + # Scalar cases + scalar_sizes = [(), (1,)] + src_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + for src, idx in product(src_gen, idx_gen): + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + + # Empty cases + src_sizes = [(0,), (), (1,), (3, 2)] + src_gen = (make_arg(size) for size in src_sizes) + + idx = make_idx((0,), high=1) + for src in src_gen: + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + +def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, 1, 2, 3], [3, 2, 1, 0]) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, -1, -2, -3], [-3, -2, -1, -0]) + +def reference_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape, source, destination + args = ( + # empty inputs + ((), (), ()), + # int inputs, negative + ((3, 5, 7, 2), -2, 1), + # swap bounds + ((3, 5, 7, 2), (-1, 0), (0, -1)), + # non-sequential, negative + ((2, 3, 4, 5, 6), (3, -3, 4), (1, 0, -1)), + # idempotence, negative + ((2, 3, 4, 5, 6), (-3, 4, 3, 1), (-3, 4, 3, 1)), + # reverse, sequential, positive + ((6, 2, 3, 5, 4), (4, 3, 2, 1, 0), (0, 1, 2, 3, 4)), + # reverse, non-sequential + ((6, 2, 3, 5, 4), (-3, -2, -4, -5, -1), (2, 1, 3, 4, 0)), + # reverse, sequential, negative + ((6, 2, 3, 5, 4), (4, -2, 2, -4, -5), (-5, 1, 2, -2, -1)), + ) + + for shape, source, destination in args: + yield SampleInput(make_arg(shape), args=(source, destination)) + +def error_movedim_moveaxis(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # source length < destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3), (1, 0, -1))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0, -1\] dims\)"), + ) + + # source length > destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3, 4), (1, 0))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3, 4\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0\] dims\)"), + ) + + # repeated source dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 4, -5), (1, 0, 2))), + error_regex=r"movedim: repeated dim in `source` \(\[0, 4, -5\]\)", + ) + + # repeated destination dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, 2), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `destination` \(\[0, 4, -5\]\)", + ) + + # repeated dim (both), with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, -4), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `source` \(\[1, 0, -4\]\)", + ) + + # out of bounds source inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 1, -6), (1, 4, 2))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 4, 2), (0, 1, -6))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds source input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(-6, 1)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(3, -6)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + +def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) + shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) + + if requires_grad: + # Tests for variant_consistency_jit, grad, gradgrad + # are slower. Use smaller bags of `rep_dims` and `shapes` + # in this case. + rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] + shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] + + is_repeat_op = op_info.name in ['repeat', '_refs.repeat'] + for rep_dim, shape in product(rep_dims, shapes): + # `torch.repeat` errors for `len(rep_dims) < t.dim()`, + # so we filter such combinations. + if is_repeat_op and len(rep_dim) < len(shape): + continue + yield SampleInput(make_arg(shape), rep_dim) + + +def sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + shapes_and_args = ( + ((S, S, S), 1, 2, 2), + ((S, S, S), -1, 2, 2), + ((S, S, S), 1, 0, 0), + ((S, S, S), -1, 0, 0), + ((S, S, S), 2, 1, 2), + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def reference_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + yield from sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, is_narrow=is_narrow, **kwargs) + + shapes_and_args = ( + # 1-dim + ((M,), 0, 0, 0), # 0 elems from the left + ((M,), -1, -1, 0), # 0 elems from the right + ((M,), 0, 5, 3), # 3 elems from the left + ((M,), 0, -5, 2), # 2 elems from the right + ((M,), -1, 0, M), # M elems from the left + ((M,), 0, -M, M), # M elems from the right + + # 2-dim + ((M, S), 1, 0, 0), # dim 1, 0 elems from the left + ((S, M), -2, -1, 0), # dim 0, 0 elems from the right + ((L, S), 1, 2, 3), # dim 1, 3 elems from the left + ((L, S), -1, 3, 2), # dim 1, 2 elems from the left + ((M, L), 0, 0, M), # dim 0, M elems from the left + ((M, L), -1, -L, L), # dim 1, L elems from the right + + # 3-dim + ((L, M, S), 2, 0, 0), # dim 2, 0 elems from the left + ((M, S, L), -1, -1, 0), # dim 2, 0 elems from the right + ((S, L, M), 2, 0, M), # dim 2, M elems from the left + ((L, S, M), -1, -M, M), # dim 2, M elems from the right + ((S, L, M), 1, 0, 0), # dim 1, 0 elems from the left + ((S, L, M), 0, 2, 1), # dim 0, 1 elem from the left + ((M, S, M), -1, -5, 4), # dim 2, 4 elems from the right + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def error_inputs_narrow_narrow_copy(op_info, device, *, is_narrow, is_ref): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # 0-dim + yield ErrorInput(SampleInput(make_arg(()), 0, 0, 1), + error_type=RuntimeError, + error_regex=r"narrow\(\) cannot be applied to a 0-dim tensor\.") + + # out of bounds dim + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=RuntimeError, + error_regex=r"Expected dim < static_cast\(self_sizes.size\(\)\) to be true, but got false\.") + else: + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got 3\)") + # out of bounds dim (negative) + yield ErrorInput(SampleInput(make_arg((L, S, M)), -4, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got -4\)") + + # out of bounds start + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, M + 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got 11\)") + # out of bounds start (negative) + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, -M - 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got -11\)") + + # out of bounds length + yield ErrorInput(SampleInput(make_arg((S, L, M)), 2, 0, M + 1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(11\) exceeds dimension size \(10\)\.") + # out of bounds length (negative) + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(-1\) exceeds dimension size \(10\)\.") + else: + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"narrow\(\): length must be non-negative\.") + + # Test Tensor overload that was added for XLA. Start must be an 0-dim + # integral Tensor. narrow_copy doesn't have this overload. + # https://github.com/pytorch/pytorch/issues/31558 + if is_narrow: + # *1-dim* integral Tensor + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, make_arg(S, dtype=torch.int), 2), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + # 0-dim *bool* Tensor (bools are not allowed) + yield ErrorInput(SampleInput(make_arg((L, M, S)), -3, make_arg((), dtype=torch.bool), 3), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + +def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=None, high=None) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_axes = [ + ((3, 4, 5), 0), + ((3, 4, 5), 1), + ((3, 4, 5), 3), + ((3, 4, 5), -1), + ((3, 4, 5), -3), + ((), 0), + ((), -1), + ((1,), 0), + ((1,), -1), + ] + + for shape, axis in shapes_and_axes: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, axis) + + +def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((0, 1, 5, 5), (2, 3, 5, 5)) + kernel_sizes = (2, (2, 2), (2, 3)) + dilations = (1, 2, (1, 2)) + paddings = (0, 1, (1, 2)) + strides = (1, 2, (1, 2)) + + cases = product(shapes, kernel_sizes, dilations, paddings, strides) + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for shape, kernel_size, dilation, padding, stride in cases: + tensor = make_arg(shape) + yield SampleInput(tensor, kernel_size, dilation, padding, stride) + + # With default args + yield SampleInput(make_arg((1, 1, 5, 5)), (3, 3)) + + +def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((S, 1, S, 1), ()), + ((1, 1, 1, 1), ()), + ((1, 1, 1, 1), (0,)), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (2,)), + ((S, 1, S, 1), (-2,)), + ((), (0, )), + ) + + for shape, args in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, args=args) + + +def sample_inputs_squeeze_multiple(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((1, 1, 1, 1), ()), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (1, 3)), + ((S, 1, S, 1), (1, 2,)), + ((), (0,)), + ) + + for shape, dims in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, dims) + + +def _squeeze_ref(x, axis=None): + # NumPy doesn't allow squeezing scalars + if x.ndim == 0: + return x + + if isinstance(axis, Sequence): + # Numpy doesn't allow specifying non-singular dimensions + axis = tuple(a for a in axis if x.shape[a] == 1) + + if isinstance(axis, int) and x.shape[axis] != 1: + return x + + return np.squeeze(x, axis) + +def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): + assert mode in ('constant', 'reflect', 'replicate', 'circular') + if mode in ['reflect', 'replicate']: + cases: tuple = ( # ignore + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + elif mode == 'constant': + cases = ( + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((1, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((0, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3), (1, 1, 1, 1, 1, 1)), + ((0, 3, 3, 3), (1, 2)), + ((0, 3, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((3, 3, 5, 5), (1, 2)), + ((3, 3, 5, 5), (0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 2)), + ((1, 3, 3, 3, 3), (0, 1)), + ((1, 3, 3, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + else: # mode == 'circular' + if dtype == torch.bool: + # test_dtypes fails on ASAN with for the case ab + # runtime error: load of value 190, which is not a valid value for type 'bool' + # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 + # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 + cases = ( + ((2, 3, 3), (1, 2)), + ((1, 3, 3), (1, 2)), + ) + else: + cases = ( + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if mode == 'constant': + # Default args + yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) + + if mode in ['reflect', 'replicate', 'circular']: + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode)) + else: # mode == 'constant' + for pad_value in (1., 2.): + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) + +def sample_inputs_nn_pad_replicate_negative(op_info, device, dtype, requires_grad, **kwargs): + cases: tuple = ( + ((5, 3, 4, 4), (-4, 5, 0, 0)), + ((6, 2, 4, 4), (0, 0, 2, -4)), + ((5, 6, 4, 4), (5, -4, -4, 3)), + ((4, 2, 5, 5), (-2, -1, 4, 6)), + ((2, 6, 5, 5), (8, -1, -1, -3)), + ((8, 1, 5, 5), (-2, -1, -1, -3)), + ) + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, 'replicate')) + +def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs): + # Inherit sample inputs from nn.pad, but transform them to fit + # constant_pad_nd's interface + nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args, + mode='constant', **kwargs) + + # NOTE: primTorch is more strict about the type of the fill value argument + # So we must cast it to the correct dtype + from torch._prims_common import dtype_to_type + scalar_type = dtype_to_type(dtype) + + def drop_mode_argument(input, pad, mode=None, value=None): + if value is None: + return SampleInput(input, args=(pad,)) + else: + return SampleInput(input, args=(pad, scalar_type(value))) + + for sample in nn_samples: + yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs) + +def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(()), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2, dim=1) + yield SampleInput(make_input((2, 3, 4)), repeats=torch.arange(3, device=device), dim=1) + + +def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=requires_grad, **kwargs) + + yield SampleInput(mt(100), n_fft=10, return_complex=True) + yield SampleInput(mt(100), n_fft=10, return_complex=False) + if dtype.is_complex: + yield SampleInput(mt(100), n_fft=10) + + for center in [False, True]: + yield SampleInput(mt(10), n_fft=7, center=center, return_complex=True) + yield SampleInput(mt((10, 100)), n_fft=16, hop_length=4, + center=center, return_complex=True) + + window = mt(16, low=.5, high=2.0) + yield SampleInput( + mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + yield SampleInput( + mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + if not dtype.is_complex: + yield SampleInput( + mt((10, 100)), n_fft=16, window=window, onesided=False, + return_complex=True) + + +def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def mt(shape, **kwargs): + real_shape = shape if dtype.is_complex else shape + (2,) + return make_arg(real_shape, **kwargs) + + yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) + yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) + yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) + + for center in [False, True]: + yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) + yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) + + window = make_arg(10, low=.5, high=2.0) + yield SampleInput(mt((10, 10, 6)), kwargs=dict( + n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) + yield SampleInput(mt((10, 10, 10)), kwargs=dict( + n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) + + real_window = window if not dtype.is_complex else window.real + yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) + +def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): + # create a helper function wrapping `make_tensor` + make_input = partial(make_tensor, dtype=dtype, device=device, low=-1, high=1) + + batches = [(), (0, ), (2, ), (2, 1)] + ns = [5, 2, 0] + tf = [True, False] + for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): + input = make_input((*batch, m, n)) + reflectors, tau = torch.geqrf(input) + reflectors.requires_grad_(requires_grad) + tau.requires_grad_(requires_grad) + other_matrix_shape = (m, n) if left else (n, m) + other = make_input((*batch, *other_matrix_shape), requires_grad=requires_grad) + yield SampleInput(reflectors, tau, other, left=left, transpose=transpose) + + +def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): + cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False + ) + + for sample in cholesky_inverse_samples: + psd_matrix = sample.input + sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + sample.args = (psd_matrix.requires_grad_(requires_grad),) + yield sample + + +def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, + dtype=dtype, device=device, requires_grad=requires_grad) + + # not needed once OpInfo tests support Iterables + batch_shapes = ((), (3,), (3, 3)) + for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): + shape = batch_shape + (S + size_delta, S) + input = make_arg(*shape) + yield SampleInput(input, args=(True, get_infos)) + + +def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): + def out_fn(output): + return output[1], output[2] + + for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): + lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) + lu_data.requires_grad_(requires_grad) + yield SampleInput(lu_data, pivots).with_metadata(output_process_fn_grad=out_fn) + + +def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) + + for arg in args: + yield SampleInput(make_arg((0, 0, 0)), args=arg) + yield SampleInput(make_arg((S, S, S)), args=arg) + + # Scalar tensor + yield SampleInput(make_arg(()), args=(10, )) + +def error_inputs_roll(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "`shifts` required" + s1 = SampleInput(make_arg((S,)), ()) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = ("shifts and dimensions must align") + s2 = SampleInput(make_arg((S, S)), (2, 1), 0) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = ("out of range") + s3 = SampleInput(make_arg((S, )), 0, 2) + yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) + + err_msg4 = ("Dimension specified as 0") + s4 = SampleInput(make_arg(()), 0, 0) + yield ErrorInput(s4, error_regex=err_msg4, error_type=IndexError) + +def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) + + yield SampleInput(make_arg((S, S, S))) + for arg in args: + yield SampleInput(make_arg((S, S, S)), args=arg) + + +def error_inputs_rot90(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "expected total rotation dims" + s1 = SampleInput(make_arg((S, S)), dims=(0,)) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = "expected total dims >= 2" + s2 = SampleInput(make_arg((S,))) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = "expected rotation dims to be different" + s3 = SampleInput(make_arg((S, S)), dims=(1, 1)) + yield ErrorInput(s3, error_regex=err_msg3) + + +def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): + tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, + requires_grad=requires_grad) + tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, + requires_grad=requires_grad) + + yield SampleInput(tensor_nd()) + yield SampleInput(tensor_nd(), dim=1) + yield SampleInput(tensor_nd(), dim=1, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=False, keepdim=False) + + yield SampleInput(tensor_nd(), dim=(1,), correction=1.3) + yield SampleInput(tensor_nd(), dim=(1,), correction=S // 2) + yield SampleInput(tensor_nd(), dim=None, correction=0, keepdim=True) + yield SampleInput(tensor_nd(), dim=None, correction=None) + yield SampleInput(tensor_nd(), correction=0, keepdim=True) + yield SampleInput(make_tensor(3, 4, 5, device=device, dtype=dtype, requires_grad=requires_grad), dim=-3) + + +def sample_inputs_std_var_unbiased(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad) + + # Test var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + yield SampleInput(make_arg((S, S)), True) + yield SampleInput(make_arg((S,)), False) + + +def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): + shapes = [(2,), (1, 2), (3, 2), (2, 3)] + for shape in shapes: + yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + + +def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): + return (SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)) + + +def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_correlation_inputs(device, dtype, requires_grad): + yield SampleInput(t) + num_observations = t.numel() if t.ndimension() < 2 else t.size(1) + fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) + aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) + for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): + yield SampleInput(t.clone().requires_grad_(requires_grad), + correction=correction, fweights=fw, aweights=aw) + + +def error_inputs_cov(op_info, device, **kwargs): + a = torch.rand(S, device=device) + yield ErrorInput( + SampleInput(torch.rand(S, S, S, device=device)), + error_regex="expected input to have two or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, S, device=device)), + error_regex="expected fweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(S, S, device=device)), + error_regex="expected aweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, device=device)), + error_regex="expected fweights to have integral dtype") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([1, 1], device=device)), + error_regex="expected aweights to have floating point dtype") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([1], device=device)), + error_regex="expected fweights to have the same numel") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(1, device=device)), + error_regex="expected aweights to have the same numel") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([-1, -2, -3, -4 , -5], device=device)), + error_regex="fweights cannot be negative") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([-1., -2., -3., -4., -5.], device=device)), + error_regex="aweights cannot be negative") + + +def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = [((1, 2, 3, 4), (0, 2, 3, 1)), + ((1, 2, 3, 4), (0, -2, -1, 1)), + ((), ()), + ((1, 2, 3, 4), (2, 1, 3, 0))] + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=(args,)) + +def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((), ()), + ((1,), (0,)), + ((2, 2), (1, 0)), + ((2, 2), (0, 1)), + ((2, 0, 1), (0, 2, 1)), + ((3, 4, 2), (2, 1, 0)), + ((3, 4, 2), (1, 0, 2)), + ((3, 4, 2), (0, 1, 2)), + ) + + # Adds tricky permutations and permutations with noncontiguity + for shape, permutation in cases: + for p in itertools.permutations(permutation): + a = make_arg(shape).permute(p) + yield SampleInput(a, args=(permutation,)) + + a = make_arg(shape, noncontiguous=True).permute(p) + yield SampleInput(a, args=(permutation,)) + +def error_inputs_softshrink(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}), + error_regex="lambda must be greater or equal to 0, but found to be -0.5") + +def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for lbda in (0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + # Note that unlike softshrink, lambd is allowed to be negative for hardshrink + for lbda in (-0.5, 0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of min_val and max_val beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for max_val, min_val in ((0.5, -0.5), (0., 0.)): + yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def error_inputs_hardtanh(op_info, device, **kwargs): + # Tests that hardtanh errors out when passed min_val > max_val. + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"min_val": 0.5, "max_val": -0.5}), + error_type=ValueError, error_regex="min_val cannot be greater than max_val") + +def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): + def c(t): + return t.clone().requires_grad_(requires_grad) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg((3,)) + y = make_arg((4,)) + A = make_arg((2, 3,)) + B = make_arg((1, 3,)) + C = make_arg((1, 2, 3,)) + D = make_arg((1, 3, 4,)) + E = make_arg((4, 4,)) + H = make_arg((3, 3,)) + I = make_arg((1, 3, 1,)) + + # Vector operations + yield SampleInput([c(x)], 'i->') # sum + yield SampleInput([c(x), c(y)], 'i,j->ij') # outer + + # Matrix operations + yield SampleInput([c(A)], "ij->i") # col sum + yield SampleInput([c(A), c(B)], "ij,kj->ik") # matmul + yield SampleInput([c(A), c(E)], "ij,Ab->ijAb") # matrix outer product + + # Tensor operations + yield SampleInput([c(C), c(D)], "aij,ajk->aik") # batch matmul + yield SampleInput([c(D), c(E)], "aij,jk->aik") # tensor matrix contraction + yield SampleInput([c(C), c(B)], "ijk,ik->j") # non contiguous + + # Test diagonals + yield SampleInput([c(I)], 'iji->j') # non-contiguous trace + + # Test ellipsis + yield SampleInput([c(H)], "i...->...") + yield SampleInput([c(C), c(x)], '...ik, ...j -> ij') + + +def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((S, M, S), (S, 0, M)) + all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) + + for size, dims in product(sizes, all_dims): + yield SampleInput(make_arg(size), kwargs={"dims": dims}) + +def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): + shapes = [ + (S, M, S), + (S, 0, M), + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + return (SampleInput(make_arg(shape, low=None, high=None)) for shape in shapes) + +def error_inputs_fliplr(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), + error_regex="Input must be >= 2-d.") + +def error_inputs_flipud(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), + error_regex="Input must be >= 1-d.") + +def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + make_integral_arg = partial(make_tensor, dtype=torch.int32, device=device, low=None, high=None, requires_grad=False) + shape = (S, M, S) + + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape[1:]), make_arg(shape[1:]))) + yield SampleInput(make_arg(shape), args=(make_arg((S, 1, S)),)) + yield SampleInput(make_arg(shape), args=(None, make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape), None)) + # test type promotion + yield SampleInput(make_arg(shape), args=(make_integral_arg(shape), None)) + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_integral_arg(shape))) + +def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): + yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) + supported_dtypes = op.supported_dtypes(device) + + # broadcasting and oncontiguous cases + cases = ( + ((4, 4), (4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 1, 4)), + ((4, 4, 1), (1, 4, 4), (4, 4)), + ((4, 1), (1, 4, 4), (1, 4)), + ((4, 4), (), (4, 4)), + ((4, 4), (), ()), + ((), (4, 4), (1, 4, 4)), + ) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) + yield SampleInput(make_arg(a, noncontiguous=True), + args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) + + # scalar cases + if supports_scalars: + cases = [ + ((), 1, 2,), + ((), 1., 2), + ((4, 4), 1., 2,), + ((3, 4), make_scalar_tensor(), make_scalar_tensor()), + ] + + if torch.complex64 in supported_dtypes: + cases.extend([ + ((3, 1, 4), complex(1, 2), 3.), + ]) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(b, c)) + + # type promotion cases + # int x float + if torch.float in supported_dtypes and torch.long in supported_dtypes: + a = make_arg((), dtype=torch.long) + b = make_arg((1, 4), dtype=torch.float) + c = make_arg((3, 4)) + + cases = ( + (a, b, c), + (c, a, b), + ) + + for a, b, c in cases: + yield SampleInput(a, args=(b, c)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) + + a = make_arg((12,)) + a[4] = nan + a[7] = nan + b = make_arg((12,)) + b[1] = nan + b[7] = nan + c = make_arg((12,)) + c[9] = nan + + yield SampleInput(a, args=(b, c)) + + +def _clamp_min_numpy(a, min=None): + return np.maximum(a, min) + + +def _clamp_max_numpy(a, max=None): + return np.minimum(a, max) + + +def _clamp_numpy(a, min=None, max=None): + if min is None: + return np.minimum(a, max) + if max is None: + return np.maximum(a, min) + + return np.minimum(max, np.maximum(a, min)) + + +def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_zeros(dim_select): + assert len(dim_select) == 2 + result = make_arg(3 * (S,)) + result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() + result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() + result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() + return result + + for dim in range(3): + yield SampleInput(make_arg((S, S, S)), args=(dim,)) + # Scalar tensors and empty tensor + for size in [(), (1,), (0,)]: + yield SampleInput(make_arg(size), args=(0,)) + + yield SampleInput(prod_zeros([0, 1]), args=(1,)) + yield SampleInput(prod_zeros([0, 2]), args=(1,)) + yield SampleInput(prod_zeros([1, 2]), args=(1,)) + + # test dtype kwarg + yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) + +def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S), ()) + return (SampleInput(make_arg(size)) for size in sizes) + +def error_inputs_complex(op_info, device, is_ref=False, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + + if is_ref: + error_float = "Expected both inputs to be Half, Float or Double tensors but got torch.float32 and torch.int32" + error_dtype = "Expected object of scalar type torch.float32 but got scalar type torch.float64 for second argument" + error_out = "Expected out tensor to have dtype torch.complex128 but got torch.complex64 instead" + else: + error_float = "Expected both inputs to be Half, Float or Double tensors but got Float and Int" + error_dtype = "Expected object of scalar type Float but got scalar type Double for second argument" + error_out = "Expected object of scalar type ComplexDouble but got scalar type ComplexFloat for argument 'out'" + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.int)), + error_type=RuntimeError, error_regex=error_float) + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.float64)), + error_type=RuntimeError, error_regex=error_dtype) + + yield ErrorInput(SampleInput(make_arg(M, S, dtype=torch.float64), make_arg(M, S, dtype=torch.float64), + out=make_arg(M, S, dtype=torch.complex64)), + error_type=RuntimeError, error_regex=error_out) + +def sample_inputs_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = (S, S) + yield SampleInput(make_arg(shape), make_arg(shape)) + +def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_single_zero(): + result = make_arg(2 * (S,)) + result[0, 1] = 0 + return result + + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + # only Tensor, ignore other inputs + yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) + yield sample + + # Generates samples with keepdim = True + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + sample.kwargs['keepdim'] = True + yield sample + + yield SampleInput(prod_single_zero()) + yield SampleInput(make_arg((3, 3, 3)), args=(1,)) + yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) + + yield SampleInput(make_arg((3, 0)), args=(1,)) + yield SampleInput(make_arg((3, 0)), args=(1,), kwargs={'keepdim': True}) + yield SampleInput(torch.tensor([2., 3, 0, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + + # test zero scalar tensor + zero = make_arg(()) + zero.zero_() + yield SampleInput(zero.clone().requires_grad_(requires_grad)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), + args=(0,), + kwargs={'keepdim': True}) + +def error_inputs_neg(op_info, device, **kwargs): + si = SampleInput(torch.tensor((False, True), device=device)) + msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." + " If you are trying to invert a mask, use the `\\~` or" + " `logical_not\\(\\)` operator instead.") + yield ErrorInput(si, error_regex=msg) + +def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg(M)) + + tensors = ( + make_arg((M, M)), + make_arg((3, 5)), + make_arg((5, 3)), + ) + + args = ((), (2,), (-2,), (1,), (2,)) + + for tensor, arg in product(tensors, args): + yield SampleInput(tensor.clone().requires_grad_(requires_grad), *arg) + +def reference_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_diagonal_diag_embed( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes1d = ((0,), (1,)) + shapes2d = ((L, M),) + shapes3d = ((L, M, S),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 > dim2 is allowed + dict(dim1=1, dim2=0), + # negative dims are allowed + dict(dim1=-2, dim2=-1), + # one dim negative and the other nonnegative is allowed + dict(dim1=-1, dim2=0), + # out of bounds offset should return an empty tensor in diagonal and + # offset the diagonal in diag_embed + dict(offset=100), + ) + + kwargs3d = kwargs2d + ( + # make sure we can use non-sequential dims + dict(offset=-1, dim1=0, dim2=2), + ) + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + if 'diagonal' in op_info.name: + # these are error inputs for diagonal + if shape in ((0,), (1,)): + continue + yield SampleInput(input=make_arg(shape), kwargs=kwargs) + + +def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # Shapes for 2D Tensors + shapes_2d = ((M, M), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((M, M, M),) + + args_2d = ((), (2,), (-2,), (1,)) + args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) + + for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): + input_ = make_arg(input_shape) + # We can programmatically figure out the right shape for src: + # It should be the same size as input.diagonal(other_args...) + if not isinstance(arg, tuple): + arg_tuple = (arg,) + else: + arg_tuple = arg + src_shape = input_.diagonal(*arg_tuple).size() + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *arg_tuple)) + + +def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S))).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + yield SampleInput(make_arg((S, S)), 1).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + +def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): + batch_size, num_classes = shape = (2, 3) + reductions = ("mean", "sum", "none") + + input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ + (shape, {}), + ((*shape, 1), {}), + ((*shape, 1, 2), {}), + ((*shape, 1, 2, 3), {}), + *[(shape, dict(reduction=reduction)) for reduction in reductions], + *[ + ( + shape, + dict( + weight=make_tensor((num_classes,), device=device, dtype=dtype), + reduction=reduction, + ), + ) + for reduction in reductions + ], + (shape, dict(ignore_index=1)), + ] + + for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): + input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) + + if probabilities_target: + # ignore_index is not supported for probabilities target + if "ignore_index" in kwargs: + continue + + target = make_tensor( + input_shape, + low=0, + high=1, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + else: + target = make_tensor( + (batch_size, *input_shape[2:]), + low=0, + high=num_classes, + device=device, + dtype=torch.long, + ) + + if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): + # make sure at least one item in target is not ignored + target[0] = random.sample(sorted(set(range(num_classes)) - {kwargs["ignore_index"]}), 1)[0] + + yield SampleInput(input, target, **kwargs) + + +def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): + low, high = op_info.domain + + # Note: Operator is very sensitive at points near the + # start and end of domain and leads to NaN for float16 + # if domain_eps is 1e-5. + if dtype.is_floating_point or dtype.is_complex: + domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 + + low = low + domain_eps + high = high - domain_eps + + make_arg = partial(make_tensor, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg((S, S, S)), 0.2) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg(()), 0.2) + +def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # isin has two paths based on the size of elements and test_elements. + # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): + yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) + # else: + yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) + +def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), + broadcasts_input=True) + +def error_inputs_masked_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float) + for mask_dtype in [torch.float, torch.uint8]: + yield ErrorInput(SampleInput(make_arg(1, 3), args=(torch.ones(1, 3, device=device, dtype=mask_dtype), + make_arg(3, 4))), + error_regex=r"masked_scatter_ only supports boolean masks") + +def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) + + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg(())), + broadcasts_input=True) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, 10), + broadcasts_input=True) + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CUDA but `value` is a CPU scalar tensor. + yield SampleInput(make_arg((S, S)), + args=(torch.randn(S, S, device=device) > 0, + make_tensor((), device="cpu", dtype=dtype))) + +def error_inputs_masked_fill(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # `value` is not a 0-D tensor. + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))), + error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension") + # downcasting complex value (scalar overload) + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)), + error_regex=r"value cannot be converted to type .* without overflow") + # downcasting complex value (tensor overload) + yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device), + args=(make_arg(()) > 0, torch.tensor(1j, device=device))), + error_regex=r"value cannot be converted to type .* without overflow") + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CPU but `value` is a CUDA scalar tensor. + yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'), + args=(torch.randn(S, S, device='cpu') > 0, + torch.randn((), device='cuda'))), + error_regex=r"to be on same device") + + +def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + + yield SampleInput(make_arg((M, M)), torch.randn(M, M, device=device) > 0) + + yield SampleInput(make_arg((M, M)), torch.randn((M,), device=device) > 0) + yield SampleInput(make_arg((M,)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg((M, 1, M)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg(()), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg((M, M)), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg(()), torch.randn((M, M), device=device) > 0) + +def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S))) + yield SampleInput(make_arg((S, S, S))) + +def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, + high=None, requires_grad=requires_grad) + test_cases = (((L,), (L,)), + ((S, M), (M,)), + ((M,), (M, S)), + ((S, M), (M, S)), + ((S, 0), (0, M)), + ((S, S, M), (M,)), + ((S, S, M), (M, S)), + ((S, S, 0), (0, S)), + ((M,), (S, M, S)), + ((S, M), (S, M, S)), + ((0, 0), (S, 0, 0)), + ((S, S, M, M), (S, S, M, S)), + ((S, S, M, M), (M,)), + ((M,), (S, S, M, S)), + ((S, S, S), (1, S, S)) + ) + for lhs_shape, rhs_shape in test_cases: + lhs = make_arg(lhs_shape) + rhs = make_arg(rhs_shape) + if not is_rmatmul: + yield SampleInput(lhs, rhs) + else: + yield SampleInput(rhs, lhs) + + +def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, + requires_grad: bool, + *, variant: str, **kwargs) -> List[SampleInput]: + if variant == 'variadic': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return tensors + elif variant == 'list': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return [tensors] + else: + raise ValueError( + 'Unsupported variant, must be one of {"variadic", "list"}. ' + f'Got "{variant}".') + + SCALAR = torch.Size([]) + VECTOR = torch.Size([3]) + test_cases: List[List[torch.Size]] = [ + [SCALAR], + [VECTOR], + [VECTOR, SCALAR], + [VECTOR, SCALAR, VECTOR], + [VECTOR, SCALAR, VECTOR, SCALAR], + ] + + for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): + args = make_inputs( + [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes]) + yield SampleInput(*args, indexing=indexing) + + +def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + # Since the accepted lower bound for input + # to mvlgamma depends on `p` argument, + # the following function computes the lower bound + # which we pass to `make_tensor`. + def compute_min_val(p): + return (p - 1.) / 2 + + for shape, n in product(tensor_shapes, ns): + min_val = compute_min_val(n) + if not dtype.is_floating_point: + # Round-up minimum value for integral dtypes + min_val += 1 + else: + min_val += 2 * torch.finfo(dtype).eps + yield SampleInput(make_arg(shape, low=min_val), args=(n,)) + + +# Since `mvlgamma` has multiple entries, +# there are multiple common skips for the additional +# entries. Following function is a helper to that end. +def skips_mvlgamma(skip_redundant=False): + skips = ( + # outside domain values are hard error for mvlgamma op. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.float16, torch.int8)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=(torch.int8,)), + ) + if skip_redundant: + # Redundant tests + skips = skips + ( # type: ignore[assignment] + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + ) + return skips + + +# To test reference numerics against multiple values of argument `p`, +# we make multiple OpInfo entries with each entry corresponding to different value of p. +# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. +def make_mvlgamma_opinfo(variant_test_name, domain, skips, sample_kwargs): + return UnaryUfuncInfo('mvlgamma', + ref=reference_mvlgamma if TEST_SCIPY else None, + aliases=('special.multigammaln',), + variant_test_name=variant_test_name, + domain=domain, + decorators=(precisionOverride({torch.float16: 5e-2}),), + dtypes=all_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_mvlgamma, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=skips, + sample_kwargs=sample_kwargs) + + +def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): + def _make_tensor_helper(shape, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(_make_tensor_helper((S, S, S)), 0) + yield SampleInput(_make_tensor_helper((S, S, S)), 1) + yield SampleInput(_make_tensor_helper(()), 0) + + if supports_dtype_kwargs: + # NOTE: if `dtype` is not same as input, then inplace variants fail with + # `provided dtype must match the dtype of self tensor in cumsum` + yield SampleInput(_make_tensor_helper((S, S, S)), 1, dtype=dtype) + + +def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((), (0, 1, 1)), + ((S, S, S, S), (0, 3, 1)), + ((S, S, S, S), (1, 3, 1)), + ((S, S, S, S), (2, 3, 1)), + ((S, S, S, S), (3, 3, 1)), + ((S, S, S, S), (0, 3, 2)), + ((S, S, S, S), (1, 3, 2)), + ((S, S, S, S), (2, 3, 2)), + ((S, S, S, S), (3, 3, 2)), + ((S, S, S, S), (0, 4, 1)), + ((S, S, S, S), (1, 4, 1)), + ((S, S, S, S), (2, 4, 1)), + ((S, S, S, S), (3, 4, 1)), + ((M,), (0, 3, 1)), + ((M,), (0, 3, 2)), + ((M,), (0, 3, 3)), + ((1000,), (0, 3, 11)), + ((1000,), (0, 2, 27)), + ((10, 10), (0, 1, 2)), + ((10, 10), (1, 2, 3)), + ((10, 10), (1, 2, 2)), + ((S, S, S), (2, 3, 2)), + ) + + for shape, arguments in test_cases: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *arguments) + +def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if list_args: + cases = ( + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), 2),), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), -2),) + ) + else: + cases = ( # type: ignore[assignment] + ((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = (((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3), 0]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), 2)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), -2)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): + def apply_grad(t): + if dtype in floating_types_and(torch.float16, torch.bfloat16): + t.requires_grad_(requires_grad) + + def large_1d_unique(dtype, device): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype) + apply_grad(res) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique(dtype, device)) + + yield SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + +def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4) + # broadcast rhs with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S,)), make_arg((S, S))) + # broadcast rhs and weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, 1)), make_arg((S,))) + # broadcast lhs + yield SampleInput(make_arg((S,)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # scalar broadcast_lhs + yield SampleInput(make_arg(()), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # tensor broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), make_arg((S, 1))).with_metadata( + broadcasts_input=True) + # no broadcast with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, S)), make_arg((S, S))) + # broadcast lhs with weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor variant + yield SampleInput(make_arg((S, S)), make_arg((S, S, S)), make_arg((S,))).with_metadata( + broadcasts_input=True) + + if dtype.is_complex: + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 1.2 + 0.1j) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 5.4 + 9j) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4j) + yield SampleInput(make_arg(()), make_arg(()), 6.1 + 0.004j) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg(()), 1 + 2j) + +def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): + cases = ( + ((2, 2, 2), (2, 2, 2), (2)), + ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), + ) + for first_shape, second_shape, dims in cases: + yield SampleInput(make_tensor(first_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + make_tensor(second_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + dims=dims) + +def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + test_cases = ( + ((S, S), (M, L)), + ) + + for input_shape, other_shape in test_cases: + input = make_arg(input_shape) + other = make_arg(other_shape) + yield SampleInput(input, other) + +def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(S)) + yield SampleInput(make_arg(), make_arg(S, S)) + +def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), + (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), + (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor(()), (0, zero.clone().detach(), _tensor(()))), + (_tensor(()), (0, zero.clone().detach(), 2.5)), + ) + + for tensor, args in test_cases: + yield SampleInput(tensor, *args) + + if not requires_grad: + yield SampleInput(tensor.clone().detach(), *args, reduce='add') + + if dtype.is_floating_point: + yield SampleInput(tensor.clone().detach(), *args, reduce='multiply') + +def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + yield SampleInput(_tensor((M, S)), 0, _gather((S, S), 1, M), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), -1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 0, _gather((M, S // 2), 1, M), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), 1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), -1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor(()), 0, zero.clone().detach(), _tensor(())) + +def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + gather = partial(gather_variable, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + ((M, S), 0, gather((S, S), 1, M), (S, S)), + ((M, S), 1, gather((S, S), 0, S), (S, S)), + ((M, S), -1, gather((S, S), 0, S), (S, S)), + ((M, S), 0, gather((M, S // 2), 1, M), (M, S // 2)), + ((M, S), 1, gather((M, S // 2), 0, S), (M, S // 2)), + ((M, S), -1, gather((M, S // 2), 0, S), (M, S // 2)), + ((), 0, zero.clone().detach(), ()), + ) + + reduce = op_info.variant_test_name + for (inp_shape, dim, index, src_shape), include_self in product(test_cases, [False, True, False]): + yield SampleInput(make_arg(inp_shape), + args=(dim, index, make_arg(src_shape), reduce), + kwargs={'include_self': include_self}) + + + # Sample inputs to test edge cases for backward + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + if requires_grad and reduce == 'prod': + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) + # (c) no zeros reduced (self([2, 1])) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(1, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + # inp_shape, dim, lengths, unsafe + ((S,), 0, [0, 1, 2, 2], False), + ((S,), 0, [0, 1, 2, 2], True), + ((S,), 0, [2, 0, 3, 0], False), + ((S, S), 0, [0, 1, 2, 2], False), + # test when lengths do not sum to dim size + ((M, S, S), 0, [1, 2, 0, 6, 0], True), + # test for higher dimensions + ((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ) + + reductions = ["max", "mean", "min", "sum", "prod"] + for args, reduce, initial in product(test_cases, reductions, [1, 2]): + inp_shape, dim, lengths, unsafe = args + lengths_t = torch.tensor(lengths, dtype=torch.long, device=device) + sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial} + if mode == 'lengths': + sample_input_kwargs['lengths'] = lengths_t + elif mode == 'offsets': + zeros_shape = list(lengths_t.shape) + zeros_shape[dim] = 1 + offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim) + sample_input_kwargs['offsets'] = offsets_t + else: + raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.") + yield SampleInput(_tensor(inp_shape), + args=(reduce,), + kwargs=sample_input_kwargs) + + +def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg((S, S, S), noncontiguous=True)) + +def sample_inputs_unravel_index(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput( + torch.tensor( + [[3, 8, 13], [0, 5, 10]], + device=device, + dtype=dtype), + (4, 5)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (4, 2**30)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (2**30, 4)) + yield SampleInput( + torch.tensor(2, device=device, dtype=dtype), + (2, 2)) + max_val = 2**(8 * dtype.itemsize - (1 if dtype.is_signed else 0)) - 1 + yield SampleInput( + torch.tensor(max_val - 1, device=device, dtype=dtype), + (1, max_val)) + yield SampleInput( + torch.tensor([22, 41, 37], device=device, dtype=dtype), + (7, 6)) + yield SampleInput( + torch.tensor(min(1621, max_val), device=device, dtype=dtype), + (6, 7, 8, 9)) + yield SampleInput( + torch.tensor([], device=device, dtype=dtype), + (10, 3, 5)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], + device=device, + dtype=dtype), + (5, 8)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + device=device, + dtype=dtype), + (5, 8, 10)) + yield SampleInput( + torch.tensor(0, device=device, dtype=dtype), + ()) + + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + _, i1, i2 = np.intersect1d(a, b, assume_unique=True, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + _, i1, i2 = np.intersect1d(a, b, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + +def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((M, M), ()), + ((M, M), (2,),), + ((M, S), ()), + ((M, S), (-1,)), + ((M, M), (2,),), + ((S, M, S), ()), + ((S, M, S), (2,)), + ((3, 3, S, S), ()),) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def error_inputs_tril_triu(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for input.ndim <= 2 + yield ErrorInput(SampleInput(make_arg((4,))), error_regex="input tensor must have at least 2 dimensions") + +def sample_inputs_trilu_indices(op_info, device, dtype, requires_grad, **kwargs): + # (row, col, offset) + args_list = ((0, 0), + (20, 0), + (0, 20), + (20, 21, 0), + (20, 21, 7), + (20, 21, -7), + # Large test cases below are deliberately commented out to speed up CI + # tests and to avoid OOM error. When modifying implementations of + # tril_indices and triu_indices, please enable these tests and make sure + # they pass. + # (2, 68435455, 3), + # (5000, 5000), + # (5000, 5000, 1234), + # (5000, 5000, -1233), + ) + for args in args_list: + yield SampleInput(args[0], args=args[1:], kwargs={"dtype": dtype, "device": device}) + +def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, M, S))) + yield SampleInput(make_arg(())) + +def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs): + # NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format + # This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous + yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs) + + shapes = ( + (3, 5, 6), + (1, 1, 3, 5, 6), + (1, 1, 3, 5, 6, 1, 1), + (1, 0, 3, 5, 0, 2), + (1, 0, 3, 5, 0, 0, 1, 1, 2), + (), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + + yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + for shape, strides, offset in strided_cases: + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format}) + + # channels last 2D + yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last}) + a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last}) + + # channels last 3D + yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d}) + a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d}) + + +def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # list of tuples (shape, shape) defining the shapes of the input and output tensors + sample_shapes = [ + ((), ()), + ((S,), (1,)), + ((S, S), (1, 1)), + ((S, S), (1, S)), + ((S, S), (S, S)), + ((S, S, S), (S, 1, S)), + ] + + for input_shape, output_shape in sample_shapes: + yield SampleInput(make_arg(input_shape), args=(output_shape,)) + if output_shape == (): + continue + yield SampleInput(make_arg(input_shape), args=(list(output_shape),)) + yield SampleInput(make_arg(input_shape), args=(*output_shape,)) + + +def error_inputs_sum_to_size(op_info, device, **kwargs): + shape = (M, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M, M)) + yield ErrorInput(si, error_regex=err_msg) + + shape = (M + 1, S, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M + 1, 1)) + yield ErrorInput(si, error_regex=err_msg) + + +def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + cases = (((S, S, S), (S * S, S)), + ((), ()), + ((), (1, 1, 1)), + ) + + for shape, args_or_shape in cases: + # Update `args` based on operator + if op_info.name == 'resize_': + # resize_ takes shape/tuple of ints, + args = (args_or_shape, ) + elif op_info.name == 'resize_as_': + # resize_as_ takes another tensor + args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] + else: + raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") + + yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=args) + +def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = ( + # a, b, is_tensor_supported + ((S, S, S), (S * S, S), True), + ((S * S, S), (S, S, S), True), + ((S * S, S), (S, -1, S), False), # neg index + ((S * S * 2, S), (S, -1), False), # neg index + ((S,), (S,), True), + ((), (), False), # empty + ((), (1,), True), + ) + + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) + + cases = ( + # a, b, is_tensor_supported + ((125,), (25, 5), True), + ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1), True), + ((16, 32), (2, 4, 1, 4, 4, 1, 4), True), + ((16, 12), (12, 16), True), + ((1, 16, 12), (12, 16), True), + ((1, 5, 1, 5), (25, 1), True), + ((2, 4, 2), (4, 4), True), + ((1, 4), (1, 1, 2, 1, 2), True), + ((3, 5, 7), (7, 5, 3), True), + ((1,), (), False), # empty + ((5, 0, 2, 3), (5, 0, 2, 3), True), + ((2, 1, 0, 3, 1), (5, 0), True), + ((1,), (), False), # empty + ((4, 5, 6), (4, 5, 6, 1, 1, 1), True), + ((), (1, 1, 1, 1), False), # empty + ) + + irreversible_cases = ( + ((), (-1,), False), # neg index, empty + ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if kwargs.get("tensor_arg"): + # convert to tensor + yield SampleInput(make_arg(a), args=(make_arg(b, requires_grad=False),)) + yield SampleInput(make_arg(b), args=(make_arg(a, requires_grad=False),)) + else: + yield SampleInput(make_arg(a), args=(b,)) + yield SampleInput(make_arg(b), args=(a,)) + + for a, b, is_tensor_supported in irreversible_cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def error_inputs_view_reshape(op, device, **kwargs): + + cases = ( + # a, b, is_tensor_supported + # Reshape to different numel + ((2,), (), False), # empty + ((1, 3, 0), (), False), # empty + ((4, 3), (4, 2), True), + ((1, 3, 5), (5, 2, 2), True), + # No valid inference + ((1, 3, 5), (5, -1, 2), False), # neg index + # Two inferred shapes + ((1, 3, 5), (5, -1, -1), False), # neg index + ((1), (0, -1), False), # neg index + ((0, 5), (0, -1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if b == (5, -1, -1): + error_regex = "only one dimension can be inferred" + elif a == (0, 5): + error_regex = (r"cannot reshape tensor of 0 elements into shape " + r"\[0, -1\] because the unspecified dimension size " + r"-1 can be any value and is ambiguous") + else: + # to avoid having issues with a regex + shape = ', '.join(map(str, b)) + size = a if type(a) is int else functools.reduce(operator.mul, a, 1) + error_regex = rf"shape '\[{shape}\]' is invalid for input of size {size}" + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, + error_regex=error_regex) + + +def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): + input_list = [] + shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + yield SampleInput([make_tensor_partial(shape) for shape in shapes]) + +def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): + cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] + ((S, 2, 1), (S, 3, 1)), + ((S), (S, 5)), ((), (1, S)) + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape1, shape2 in cases: + yield SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]) + +def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + if len(shape) > 1: + yield SampleInput(make_tensor_partial(shape), start_dim=1, end_dim=-1) + +def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) + + # shape x start_dim x end_dim + cases = ( + ((5, 4, 0, 1, 3, 7), 1, 3), + ((5, 4, 0, 1, 3, 7), 4, 5), + ((5, 4, 1, 1, 3, 7), 2, 3), + ((), 0, -1), + ((1,), 0, -1), + ((3, 7, 5), 1, 2), + ((4, 5), 1, 1), + ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), + ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), + ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), + ((2, 4, 2), 0, 1), + ((4, 2, 2), 1, 2), + ((0, 3, 4, 5), 1, 3), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape, start, end in cases: + yield SampleInput(make_arg(shape), args=(start, end,)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) + yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) + +def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs): + # in_shape, dim, sizes + args = (((8,), 0, (8,)), + ((8,), 0, (4, 2)), + ((8,), -1, (2, 2, 2)), + ((8,), -1, (-1, 2)), + ((3, 6, 2), 1, (2, 3)), + ((3, 6, 2), -2, (2, 3)), + ((3, 6, 2), -2, (-1, 3)), + ((3, 2, 12), 2, (3, 2, 2)), + ((4, 0), 0, (2, 2)), + ((4, 0), 1, (2, 0, 0, 0)), + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for in_shape, dim, sizes in args: + yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes)) + + +def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (1, 2)), + ((S, S, S), (-1, 2)), + ((S, S, S), (-1, -1)), + ((S, S, S), (1, -1)), + ((S,), (0, 2)) + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (S, S), (1, 2)), + ((S, S, S), (S, S), (-1, 2)), + ((S, S, S), (S, S), (-1, -1)), + ((S, S, S), (S, S), (1, -1)), + ((S,), (), (0, 2)) + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + + +def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), + ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), + ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (1, 0, L, 1)), + ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), + ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (2, 0, L, 1)), + ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), + ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + +def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1, S), (-1, S, -1)), + ((S, 1, S), (-1, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=(args,)) + +def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((), + (2, 3)) + memory_format_options = [None, torch.contiguous_format] + + for shape, memory_format in itertools.product(shapes, memory_format_options): + yield SampleInput(make_arg(shape), + kwargs={'memory_format': memory_format} if memory_format else {}) + yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) + +def sample_inputs_byte(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=0, high=255, requires_grad=requires_grad) + + shapes = ((), + (2, 3)) + memory_format_options = [None, torch.contiguous_format] + + for shape, memory_format in itertools.product(shapes, memory_format_options): + yield SampleInput(make_arg(shape), + kwargs={'memory_format': memory_format} if memory_format else {}) + yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) + +def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + + cases = (((S, 1, 1), (S, S, S)), + ((), ()), + ((), (1, 1)), + ) + + for shape, shape_other in cases: + yield SampleInput(make_arg(shape, requires_grad=requires_grad), + args=(make_arg(shape_other, requires_grad=False),)) + + +def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + def make_bool_mask(shape): + # Make sure atleast one element is nonzero, + # except for empty tensor + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + + if mask_t.numel() == 0: + return mask_t + elif mask_t.numel() == 1: + mask_t.fill_(True) + return mask_t + + if mask_t.sum() == 0: + def random_index(shape): + return tuple(random.randrange(0, max_idx) for max_idx in shape) + + mask_t[random_index(mask_t.shape)] = True + return mask_t + + return mask_t + + cases = (((M, M), (M, M), (M, M), False), + ((M, 1, M), (M, M), (M, M, 1), True), + ((), (), (), False), + ((M, 1, M), (), (M, M, 1), True), + ((), (M, M), (), True), + ((), (2), (1, 1), True), + ) + + for shape, mask_shape, other_shape, broadcasts_input in cases: + yield SampleInput(make_arg(shape), + args=(make_bool_mask(mask_shape), make_arg(other_shape)), + broadcasts_input=broadcasts_input) + +# TODO: add reference inputs for where(condition) signature +def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) + + make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # noncontiguous + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), noncontiguous=True) + b = make_arg((3, 10, 3)).transpose(0, -1) + + # NOTE that the OpInfo for where takes samples of the form a, cond, b + yield SampleInput(a, args=(c, b)) + + # type promoting + other_dtype = torch.double if dtype is not torch.double else torch.long + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), dtype=torch.long) + b = make_arg((10, 1)) + + yield SampleInput(a, args=(c, b)) + + # two python scalars + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((1,)).item() + b = make_arg((1,)).item() + + yield SampleInput(a, args=(c, b)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + if dtype.is_floating_point: + nan = float('nan') + else: + # dtype.is_complex + nan = complex(float('nan'), float('nan')) + c = make_cond((1, 10, 3)) + a = make_arg((10, 3), noncontiguous=True) + a[2, 1] = nan + b = make_arg((1, 3)) + b[0, 2] = nan + + yield SampleInput(a, args=(c, b)) + + # Python scalars type promotion + for scalar in (0, 0.0, 2j, False): + yield SampleInput(scalar, args=(c, b)) + yield SampleInput(a, args=(c, scalar)) + + +def error_inputs_where(op_info, device, **kwargs): + shape = (S,) + err_msg = "Expected all tensors to be on the same device" + for devices in product(('cpu', device), repeat=3): + if len(set(devices)) == 2: + si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), + args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), + make_tensor(shape, device=devices[2], dtype=torch.float32))) + yield ErrorInput(si, error_regex=err_msg) + +def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + for input_t, as_tuple in product(inputs, [False, True]): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(as_tuple=as_tuple)) + +def sample_inputs_nonzero_static(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + nonzero_sizes = [0, 1, XS, S, M] + + for input_t, nonzero_size in product(inputs, nonzero_sizes): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(size=nonzero_size)) + +def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ((S, S, S), (S, -1))) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=args) + +def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # shape x chunks x dim + cases = ( + ((13, 9, 11), 17, -1), + ((13, 9, 11), 11, -1), + ((13,), 12, -1), + ((15,), 12, -1), + ((15,), 7, 0), + ((15,), 9, 0), + ((3, 7), 9, 1), + ((3, 7), 9, 0), + ((3, 7), 2, 0), + ((3, 7), 3, 0), + ((3, 7), 1, 0), + ((3, 7), 1, 1), + ((4, 4), 2, 0), + ) + + for shape, chunks, dim in cases: + yield SampleInput(make_arg(shape), args=(chunks, dim)) + +def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + test_cases = [ + ((S, S, S), (2,)), + ((S, S, S), (2, 1,)), + ((S, S, S), (2, -1,)), + ((S, S, S), (2, 1, True,)), + ((S, S, S), (2, -1, True,)), + ((S,), (2, 0,)), + ((S,), (2, 0, True,)), + ((), (1,)), + ((), (1, 0,)), + ((), (1, 0, True)), + ] + + yield from (SampleInput(_tensor(tensor), *args) for tensor, args in test_cases) + +def error_inputs_kthvalue(op_info, device, **kwargs): + # tests overlapping output fails + t = make_tensor(10, dtype=torch.float32, device=device) + indices = torch.empty((), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(t, 5, out=(t, indices)), + error_regex="unsupported operation") + + k_out_of_range_err = "selected number k out of range for dimension" + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3, 0), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.tensor(2, device=device), 3), + error_regex=k_out_of_range_err) + +def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, + train=None, valid_input_dim=None, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if valid_input_dim: + cases = ((S,) * i for i in valid_input_dim) + else: + cases = ((S, S), (S,), ()) + p_vals = [0.0, 0.5, 1.0] + # This is to handle special case for feature_alpha_dropout which has different + # supported dtypes depending on `train` parameter + training_vals = [train] if train is not None else [True, False] + + for case, p, training in product(cases, p_vals, training_vals): + yield SampleInput(make_arg(case), p=p, training=training) + yield SampleInput(make_arg(case)) + +def sample_inputs_dropout_backward(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_mask = partial(make_tensor, device=device, dtype=torch.bool, requires_grad=False) + + cases = ((S, S, S, S), (S,), ()) + scale_vals = [0.0, 1.0, 2.0] + + for case, scale in product(cases, scale_vals): + yield SampleInput(make_arg(case), make_mask(case), scale) + +def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high, noncontiguous=False): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, + noncontiguous=noncontiguous) + + def make_per_sample_weight(flag, idx): + # a tensor of float / double weights, or None + # to indicate all weights should be taken to be 1 + if flag: + return make_input(idx.shape) + return None + + offsets = torch.tensor([0, 3], device=device, dtype=torch.long) + for generate_per_sample_weight in (True, False): + for mode in ('sum', 'mean', 'max'): + # per_sample_weights is only supported for mode='sum' (got mode='****') + if generate_per_sample_weight and mode in ('mean', 'max'): + continue + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # bag with zero length + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), + 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + # The gradient vector at `padding_idx` is not updated. + # Negative padding_idx + idx = make_long_input((6,), low=0, high=S) + idx[0] = 4 + idx[4] = 4 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': -1, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((3, 3), low=0, high=S) + # Positive padding_idx + idx[0, 0] = 2 + idx[1, 1] = 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': 2, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'norm_type': 1.0, + 'mode': mode, 'offsets': offsets, + 'per_sample_weights': per_sample_weights},) + + if mode != 'max': + # Scale the gradient based on the inverse frequency of a particular index. + # Note : smax mode does not support sparse weights + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'scale_grad_by_freq': True, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + # gradcheck not implemented for sparse tensors. + # Note : max mode does not support sparse weights + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((6, ), low=0, high=S) + idx[0] = 1 # freq more than 1 + idx[1] = 1 # freq more than 1 + idx[3] = 0 # padding_idx + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, + 'max_norm': 1., 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + +def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) + + # 0-D index tensor + idx = make_long_input((), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # The gradient vector at `padding_idx` is not updated. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 2 + idx[1, 1] = 2 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) + + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 4 + idx[1, 1] = 4 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) + + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) + + # Scale the gradient based on the inverse frequency of a particular index. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) + + # gradcheck not implemented for sparse tensors. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) + + idx = make_long_input((3, 3), low=0, high=S) + idx[0, 0] = 1 # freq more than 1 + idx[0, 1] = 1 # freq more than 1 + idx[1, 0] = 0 # padding_idx + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, + 'padding_idx': 0, 'max_norm': 1.}) + + +def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) + + shapes = ((), (S,), (L, M, S)) + num_classess = (-1, 10) + + return ( + SampleInput( + make_input( + shape, + low=0, + high=10 if num_classes == -1 else num_classes // 2, + ), + kwargs=dict(num_classes=num_classes), + ) + for shape, num_classes in itertools.product(shapes, num_classess) + ) + + +def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): + rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Although most losses also support the reduce and size_average combination instead of reduce, the former is + # deprecated since 0.4.1 and thus is not tested + shapes_and_kwargs = ( + ((), None), + ((S,), dict(reduction="mean")), + ((S,), dict(reduction="sum")), + ((S,), dict(reduction="none")), + ((S, S), None), + ((S, S, S), None), + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), + kwargs=kwargs) + +def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = ("bilinear", "nearest") + align_cornerss = (False, True) + padding_modes = ("zeros", "border", "reflection") + + for dim in (2, 3): + + modes_ = (*modes, "bicubic") if dim == 2 else modes + + for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, *[S] * dim)), + _make_tensor((batch_size, *[S] * dim, dim)), + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + +def reference_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + + batch_size = 2 + num_channels = 3 + height = 345 + width = 456 + modes = ("bilinear", "nearest", "bicubic") + align_cornerss = (False, True) + padding_modes = ('zeros', 'border', 'reflection') + + # Create an affine transformation matrix + a = torch.deg2rad(torch.tensor(45.0)) + ca, sa = torch.cos(a), torch.sin(a) # rotation angles + s1, s2 = 1.23, 1.34 # scales + + theta = torch.tensor([[ + [ca / s1, sa, 0.0], + [-sa, ca / s2, 0.0], + ]], dtype=dtype, device=device) + theta = theta.expand(batch_size, 2, 3).contiguous() + + x = torch.arange(batch_size * num_channels * height * width, device=device) + x = x.reshape(batch_size, num_channels, height, width).to(torch.uint8) + x = x.to(dtype=dtype) + x.requires_grad_(requires_grad) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + grid = torch.nn.functional.affine_grid( + theta, size=(batch_size, num_channels, height, width), align_corners=align_corners + ) + yield SampleInput( + x, + grid, + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_grid_sampler_2d(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = (0, 1, 2) + align_cornerss = (False, True) + padding_modes = (0, 1, 2) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, S, L)), + _make_tensor((batch_size, M + 3, M, 2)), + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_target(shape): + shape = () if len(shape) == 1 else (shape[0], ) + t = torch.randint(0, 2, shape, device=device, dtype=torch.long) + # Label with -1 or 1 + t = t * 2 - 1 + target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) + return target + + shapes = ((S, S), (S,)) + reductions = ('none', 'mean', 'sum') + for s, r in product(shapes, reductions): + yield SampleInput( + make_input(s), + args=(make_input(s), make_target(s)), + kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) + ) + +def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): + input_length = 50 + batch = 16 + num_char = 20 + target_length = 30 + + def make_log_probs(s): + t = make_tensor(s, device=device, dtype=dtype) + log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) + return log_probs + + reductions = ('none', 'mean', 'sum') + zero_inf = (True, False) + lengths_type = (list, torch.Tensor) + for r, z, lt in product(reductions, zero_inf, lengths_type): + log_probs = make_log_probs((input_length, batch, num_char)) + targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) + input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) + target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) + + # Dont generate int[] types if reduction = "Mean" since this results in non composite compliant calls + # to ctc_loss.IntList since a tensor needs to be created from the target lengths. + # Creating such a tensor requires the use of pointers to copy data from int[] -> torch.Tensor + # e.g. via std::copy. Similarly symbolic/real tracing with fx will also not work + if lt is list and r in ["none", "sum"]: + input_lengths = input_lengths.tolist() + target_lengths = target_lengths.tolist() + + yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) + +def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + shape = (2, 3) + num_classes = shape[1] + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # FIXME: Derivative wrt. weight not implemented + make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) + + def make_target(shape, zeros=False): + s = (shape[0], *shape[2:]) if len(shape) > 1 else () + if zeros: + return torch.zeros(s, device=device, dtype=torch.long) + else: + return make_tensor(s, + low=0, + high=shape[1] if len(shape) > 1 else shape[0], + device=device, + dtype=torch.long) + + + def gen_shape_kwargs(): + # Batched, non-batched and 2d + shapes = (shape, (num_classes,), shape + (2, 2)) + reductions = ('none', 'mean', 'sum') + for reduction, s in product(reductions, shapes): + yield make_input(s), make_target(s), dict(reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) + t = make_target(s) + ignore = num_classes // 2 + # If "mean", nll returns NaN, so it's not differentiable at those points + if t.eq(ignore).all() and reduction == "mean": + t.fill_(0) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) + # Test ignoring all the targets + # If "mean", nll returns NaN, so it's not differentiable at those points + if reduction != "mean": + yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target,), kwargs=kwargs) + + target = torch.tensor([-1, 2], device=device, dtype=torch.long) + yield SampleInput(make_input(shape), args=(target,), kwargs={'ignore_index': -1}) + + +def sample_inputs_binary_cross_entropy_with_logits( + op_info, device, dtype, requires_grad, **kwargs +): + make = partial(make_tensor, device=device, dtype=dtype) + make_prob = partial(make, low=0, high=1) + reductions = ("mean", "sum", "none") + + def make_weight_shape_kwargs(): + kwargs = [] + for shape in ((1,), (1, S), (S), (S, S)): + kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) + return kwargs + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *make_weight_shape_kwargs(), + *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], + ] + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + make(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + mask = torch.tensor([[0, 1, 0, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 1, 0], + [1, 0, 1, 1, 0], + [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t) + + yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) + yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) + +def _generate_sample_shape_reduction(): + shapes = ((S,), (S, S), (S, S, S)) + reductions = ('none', 'mean', 'sum') + yield from product(shapes, reductions) + +def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 + make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape(shape): + yield shape + # Broadcast + yield (*shape[:-1], 1) + yield shape[:-1] + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for t_s, v_s in product(gen_shape(s), gen_shape(s)): + yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + + for input, target, var, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, var, ), kwargs=kwargs) + +def error_inputs_gaussian_nll_loss(op_info, device, **kwargs): + _make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 3), low=0), reduction="abc"), + error_type=ValueError, error_regex="abc is not valid") + + # var is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 2), low=0)), + error_type=ValueError, error_regex="var is of incorrect size") + + # target is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 2), _make((10, 2, 3), low=0)), + error_type=RuntimeError, + error_regex=(r"The size of tensor a \(3\) must match the size of tensor b \(2\) " + r"at non-singleton dimension 2")) + +def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for s, r in _generate_sample_shape_reduction(): + yield _make_tensor(s), _make_tensor(s), dict(reduction=r) + +def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = 1 + target[~mask] = -1 + d['margin'] = random.uniform(-9, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + + # scalar input and target. + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) + +def error_inputs_hinge_embedding_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + +def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp = make_input((10, )) + inp[2] = float('nan') + target = make_input((10, )) + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Inf Handling + inp = make_input((10, )) + inp[4] = float('inf') + target = make_input((10, )) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Broadcasting + inp = make_input((5, 5)) + target = make_input((1, 5)) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + +def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + d['delta'] = random.uniform(1e-3, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + +def error_inputs_huber_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + err = 'is not a valid value for reduction' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex=err) + # delta <= 0 + for delta in (0, -1): + err = 'huber_loss does not support non-positive values for delta.' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'delta': delta}), + error_type=RuntimeError, error_regex=err) + +def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for li in (True, False): + for f in (True, False): + i1 = _make_tensor(s) + i2 = _make_tensor(s) + # For Poisson NLL Loss, + # target is assumed to be from + # Poisson Distribution which + # always has positive samples + t1 = _make_tensor(s, low=0) + t2 = _make_tensor(s, low=0) + + if not li: + i1.abs_() + i2.abs_() + t1.abs_() + t2.abs_() + + yield ( + i1, t1, + dict(log_input=li, full=f, reduction=r) + ) + yield ( + i2, t2, + dict(log_input=li, full=f, + eps=random.uniform(1e-8, 1e-3), + reduction=r) + ) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, ), kwargs=kwargs) + + # test INT_TO_FLOAT promotion + if dtype.is_complex: + for d in (torch.bool, torch.int64): + yield SampleInput(_make_tensor(dtype=dtype), args=(_make_tensor(dtype=d),)) + yield SampleInput(_make_tensor(dtype=d), args=(_make_tensor(dtype=dtype),)) + +def error_inputs_poisson_nll_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(5\) must match the ' + r'size of tensor b \(4\) at non-singleton ' + r'dimension 1)')) + +def error_inputs_soft_margin_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)')) + +def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): + make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) + + kwargss = ( + *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], + dict(swap=True), + *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], + ) + + for kwargs in kwargss: + input = make() + args = (make(), make()) + if with_distance: + kwargs["distance_function"] = torch.nn.PairwiseDistance() + yield SampleInput(input, args=args, kwargs=kwargs) + +def error_inputs_triplet_margin_loss(op_info, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + samples = ( + # input, args, kwargs, error_type, error_regex + # invalid reduction + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(reduction="abc"), + ValueError, "abc is not a valid value for reduction"), + + # invalid margin + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(margin=-1.0), + ValueError, "margin must be greater than 0, got -1.0"), + + # shape mismatch + (make_input(3, 5), (make_input(3, 4), make_input(3, 4)), + {}, + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(5\) must match the size of tensor b \(4\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 5), make_input(3, 4)), + {}, + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 4), make_input(3, 5)), + {}, + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + + # different dimensions + (make_input(3,), (make_input(3, 4), make_input(3, 4)), + {}, + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 1D, positive 2D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3,), make_input(3, 4)), + {}, + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 1D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3, 4), make_input(3,)), + {}, + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 2D, " + r"and negative 1D inputs")), + ) + + for input, args, kwargs, error_type, error_regex in samples: + yield ErrorInput(SampleInput(input, args=args, kwargs=kwargs), + error_type=error_type, error_regex=error_regex) + +def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs): + make_mat_e4m3 = partial(make_tensor, device=device, dtype=torch.float8_e4m3fn, requires_grad=requires_grad) + make_mat_e5m2 = partial(make_tensor, device=device, dtype=torch.float8_e5m2, requires_grad=requires_grad) + make_scale = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + M, N, K = 15, 32, 16 + samples = [] + # two e4m3 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + scale1 = make_scale((1,)) + scale2 = make_scale((1,)) + samples.append(SampleInput(mat1, mat2, scale1, scale2)) + # mat1 e4m3 mat2 e5m2 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e5m2((K, N)).t().contiguous().t() + scale1 = make_scale((1,)) + scale2 = make_scale((1,)) + samples.append(SampleInput(mat1, mat2, scale1, scale2)) + # mat1 e5m2 mat2 e4m3 + mat1 = make_mat_e5m2((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + scale1 = make_scale((1,)) + scale2 = make_scale((1,)) + samples.append(SampleInput(mat1, mat2, scale1, scale2)) + + yield from samples + +def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8 + num_heads_q_gqa, num_heads_kv_gqa = 32, 8 + + dim_3_q_shape = (batch, seq_q, head_dim) + dim_3_kv_shape = (batch, seq_kv, head_dim) + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + broadcast_tuple = ((num_heads, seq_q, head_dim), (batch, num_heads, seq_kv, head_dim)) + + qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] + samples = [] + gqa_options = [False] if TEST_WITH_ROCM else [True, False] # TODO: GQA support + if TEST_WITH_ROCM and dtype == torch.float32: + causal_options = [False] # FIXME: Large errors with causal+fp32 + else: + causal_options = [True, False] + for qkv_shape, is_causal, dropout_p, enable_gqa in product( + qkv_shapes, causal_options, [0.0, 0.5], gqa_options): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q), + make(shape_kv), + make(shape_kv), + is_causal=is_causal, + dropout_p=dropout_p + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim + 8)), + is_causal=is_causal, + dropout_p=dropout_p + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + attn_mask=make((seq_q, seq_kv)), + is_causal=False, + dropout_p=0.0) + ) + + if not TEST_WITH_ROCM: + samples.append( + SampleInput( + make((batch, num_heads_q_gqa, seq_q, head_dim)), + make((batch, num_heads_kv_gqa, seq_kv, head_dim)), + make((batch, num_heads_kv_gqa, seq_kv, head_dim)), + enable_gqa=True + ) + ) + + yield from samples + + +def sample_inputs_efficient_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + mask_types = [1, 2] # UpperLeft, LowerRight + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, mask_type, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], mask_types, scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=mask_type, + compute_log_sumexp=requires_grad, + scale=scale, + seqlen_k=None + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim + 8)), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + bias=make(batch, num_heads, seq_q, seq_kv), + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None + ) + ) + + # jagged (with query/keys offsets) + cu_seqlens_k = torch.arange(-1, 32 * 2 + 1, 2, dtype=torch.int32, device=device) + cu_seqlens_k[-1] = 62 + cu_seqlens_k[0] = 0 + samples.append( + SampleInput( + make((32, 2, 64)).view(-1, 8, 8).unsqueeze(0), + make((64, 64)).view(-1, 8, 8).unsqueeze(0), + make((64, 64)).view(-1, 8, 8).unsqueeze(0), + bias=None, + cu_seqlens_q=torch.arange(0, 32 * 2 + 2, 2, dtype=torch.int32, device=device), + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=2, + max_seqlen_k=2, + dropout_p=0.0, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + seqlen_k=None, + ) + ) + + yield from samples + +def sample_inputs_flash_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + cum_seq_q=None, + cum_seq_k=None, + max_q=seq_q, + max_k=seq_kv, + dropout_p=dropout_p, + is_causal=is_causal, + return_debug_mask=False, + scale=scale, + )) + + yield from samples + +def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shape = (3,) + batched_shape = (2, *shape) + shapes_and_kwargs = [ + (shape, None), + (batched_shape, None), + (shape, dict(keepdim=True)), + (batched_shape, dict(keepdim=True)), + (shape, dict(p=5.0)), + (shape, dict(p=-1.0)), + (shape, dict(eps=1.0)), + ] + + return ( + SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs + ) + +def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 9, 2, 2)), upscale_factor=upscale_factor) + for upscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), upscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 1, 6, 6)), downscale_factor=downscale_factor) + for downscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), downscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_channel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes_groups = [ + ((1, 4, 10, 10), 2), + ((2, 6, 8, 8), 3), + ((2, 8, 5, 5), 4), + ] + + yield from ( + SampleInput(make_arg(shape), args=(groups,)) + for shape, groups in shapes_groups + ) + +def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype) + # Lower bounds must be greater than 'eps' defined in gradcheck.py::gradgradcheck() -> eps + # otherwise perturbation calculation causes Tensor value to become negative triggering + # a device-side hardware assertion + make_prob = partial(make, low=1e-6, high=1) + + reductions = ("mean", "sum", "none") + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], + ] + + if logits: + shapes_and_kwargs.extend( + [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + (make if logits else make_prob)(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): + sample_shapes = [(), (S), (S, S, S)] + atols = [1e-2, 1e-16] + rtols = [1e-1, 0.5] + eps = 1e-8 + for s, rtol, atol in product(sample_shapes, rtols, atols): + # close sample + t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + close = (t + atol).detach().requires_grad_(requires_grad) + yield SampleInput(t, close, rtol=rtol, atol=atol) + + # random sample + a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(a, b, rtol=rtol, atol=atol) + + +def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + # test COMPLEX_TO_FLOAT promotion + if dtype.is_complex: + make = partial(make_tensor, (), device=device, requires_grad=requires_grad) + yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) + yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) + +def error_inputs_l1_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)') + ) + +def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) + + # This test case always triggers the smooth condition, since absolute difference of input and target + # is smaller than beta + yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) + yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) + +def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): + # kl_div works with inputs in [0, 1] (aka the pdf of a probability measure) + # Then log [0, 1] = (-inf, 0], so this is the log space + make_arg = partial(make_tensor, low=0., device=device, dtype=dtype, requires_grad=requires_grad) + + def make_log(shape): + out = torch.nn.functional.log_softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + def make_prob(shape): + out = torch.nn.functional.softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + shapes = ((2,), (2, 3)) + reductions = ("none", "mean", "batchmean", "sum") + for shape, reduction, log_target in product(shapes, reductions, (True, False)): + input = make_log(shape) + target = make_log(shape) if log_target else make_prob(shape) + yield SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) + +def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) + yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) + +def reference_pdist(input, p=2): + pdist = scipy.spatial.distance.pdist + if p == 0: + output = pdist(input, "hamming") * input.shape[1] + elif p == float("inf"): + output = pdist(input, lambda x, y: np.abs(x - y).max()) + else: + output = pdist(input, "minkowski", p=p) + return output.astype(input.dtype) + +def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(())) + yield SampleInput(make_input((2,))) + yield SampleInput(make_input((2, 2))) + yield SampleInput(make_input((2,)), offset=1) + yield SampleInput(make_input((2,)), offset=-1) + +def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + unpool_name_to_pool_method_dict = { + 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, + 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, + 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d + } + + unpool_name_to_dim = { + 'nn.functional.max_unpool1d': 1, + 'nn.functional.max_unpool2d': 2, + 'nn.functional.max_unpool3d': 3 + } + + unpool_to_pool_name_dict = {k: f'nn.functional.{v.__name__}' for k, v in unpool_name_to_pool_method_dict.items()} + + pool_dim = unpool_name_to_dim[op_info.name] + pool_method = unpool_name_to_pool_method_dict[op_info.name] + + pool_op_info = copy.copy(op_info) + pool_op_info.name = unpool_to_pool_name_dict[op_info.name] + + for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): + # shapes (C, ...) do not work as of now, + # see https://github.com/pytorch/pytorch/issues/68337 + # TODO: remove once the issue is resolved + if sample.input.dim() != pool_dim + 2: + continue + + # No dilation > 1 for max_unpool, + # see https://github.com/pytorch/pytorch/issues/68420 + if sample.kwargs['dilation'] != 1: + continue + + # Can't unpool without indices + if sample.kwargs['return_indices']: + pool, indices = pool_method(sample.input, **sample.kwargs) + # arg has to be a leaf + arg = pool.detach().requires_grad_(requires_grad) + sample_kwargs = { + 'kernel_size': sample.kwargs['kernel_size'], + 'stride': sample.kwargs['stride'], + 'padding': sample.kwargs['padding'], + # output_size could be None but we specify it explicitly + # to compensate for the information lose in pool due + # to the floor/ceil operation used to compute the shapes + 'output_size': sample.input.size() + } + + yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) + +def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): + for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + indices = sample.args[0] + # The samples for max_unpool are generated with max_pool. + # It could be that a single element from the max_pool's + # input is mapped to several locations in its output. + # This situation leads to failed gradchecks because + # the finite difference algorithm perturbs the elements + # of the output one by one, and not in classes of + # equivalences determined by whether two elements + # in the output are coming from the same location in the + # input (simply put, they have the same corresponding index). + # So, there are two ways to resolve this issue: + # 1. Extract a perturbation for one element and apply it all + # the elements from the same equivalence class, or + # 2. Make sure that the equivalence classes are all singletons, + # i.e. the index tensor has to be comprised of only unique + # indices. + # Here we go with the solution 2, the easiest of all. + if indices.unique().numel() == indices.numel(): + yield sample + +def sample_inputs_multi_head_attention_forward(opinfo, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if requires_grad: + # backward tests would take too long to complete, causing the job timeout. + bsz = 2 + is_batcheds = (True,) + use_separate_proj_weights = (False,) + emb_sizes = (2,) + src_lens = (XS,) + tgt_lens = (XS,) + heads = (2,) + dropouts = (0.5,) + mask_types = ("2d",) + else: + bsz = 2 + is_batcheds = (False, True) + use_separate_proj_weights = (False, True) + emb_sizes = (2, 4) + src_lens = (XS,) + tgt_lens = (XS, S) + heads = (1, 2) + dropouts = (0.0, 0.5) + mask_types = (None, "2d", "3d") + + for is_batched, use_separate_proj_weight, mask_type, emb_size, src_len, tgt_len, num_heads, dropout_p in itertools.product( + is_batcheds, use_separate_proj_weights, mask_types, emb_sizes, src_lens, tgt_lens, heads, dropouts + ): + attn_mask = None + if mask_type == "2d": + attn_mask = make_input(src_len, tgt_len) + elif mask_type == "3d": + attn_mask = make_input((bsz if is_batched else 1) * num_heads, src_len, tgt_len) + + if is_batched: + q = make_input(src_len, bsz, emb_size) + k = make_input(tgt_len, bsz, emb_size) + v = make_input(tgt_len, bsz, emb_size) + else: + q = make_input(src_len, emb_size) + k = make_input(tgt_len, emb_size) + v = make_input(tgt_len, emb_size) + if use_separate_proj_weight: + in_proj_weight = None + q_proj_weight = make_input(emb_size, emb_size) + k_proj_weight = make_input(emb_size, emb_size) + v_proj_weight = make_input(emb_size, emb_size) + else: + in_proj_weight = make_input(emb_size * 3, emb_size) + q_proj_weight = None + k_proj_weight = None + v_proj_weight = None + + bias_k = make_input(emb_size) + bias_v = make_input(emb_size) + in_proj_bias = make_input(emb_size * 3) + out_proj_weight = make_input(emb_size, emb_size) + out_proj_bias = make_input(emb_size) + sample_args = ( + k, v, emb_size, num_heads, in_proj_weight, + in_proj_bias, bias_k, bias_v, False, + dropout_p, out_proj_weight, out_proj_bias + ) + sample_kwargs = { + "q_proj_weight" : q_proj_weight, + "k_proj_weight" : k_proj_weight, + "v_proj_weight" : v_proj_weight, + "attn_mask" : attn_mask, + "training" : True if dropout_p > 0.0 else False, + "use_separate_proj_weight" : use_separate_proj_weight + } + + yield SampleInput(q, args=sample_args, kwargs=sample_kwargs) + + +# Includes some values such that N * N won't be a multiple of 4, +# which should ensure we test the vectorized and non-vectorized +# kernel code paths. +NUM_SIZE0_TENSORS = 10000 +foreach_num_tensors = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300] +_foreach_inputs_default_kwargs = {"noncontiguous": False, "same_size": False, "low": None, "high": None} + + +class ForeachRightmostArgType(enum.Enum): + TensorList = enum.auto() + ScalarList = enum.auto() + Scalar = enum.auto() + Tensor = enum.auto() + + +class ForeachSampleInput(SampleInput): + # For TensorList Scalar/Tensor, we compute the reference + # by converting it into TensorList ScalarList/TensorList and + # then converting into multiple Tensor Scalar/Tensor. + # ref_args contains the args converted to TensorList ScalarList/TensorList + ref_args: Any + disable_fastpath: bool + + def __init__(self, *args, disable_fastpath=False, ref_args=None, **kwargs): + super().__init__(*args, **kwargs) + self.ref_args = ref_args or self.args + self.disable_fastpath = disable_fastpath + + +class foreach_inputs_sample_func: + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + self.arity = arity + self._set_rightmost_arg_types( + rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor, + ) + self._intersperse_empty = (True, False) + + def _set_rightmost_arg_types( + self, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool, + ) -> None: + self._rightmost_arg_types = [ForeachRightmostArgType.TensorList] + if self.arity > 1: + if rightmost_supports_scalar: + self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar) + if rightmost_supports_scalarlist: + self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList) + if rightmost_supports_tensor: + self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor) + + def _sample_rightmost_arg( + self, + opinfo, + rightmost_arg_type, + device, + dtype, + num_tensors, + allow_higher_dtype_scalars, + **_foreach_inputs_kwargs, + ): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.Tensor: + return [make_tensor( + (), device=device, dtype=dtype, + noncontiguous=_foreach_inputs_kwargs["noncontiguous"], + requires_grad=_foreach_inputs_kwargs.get("requires_grad", False), + )] + should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16) + + def sample_float(): + s = random.random() + if should_use_simpler_scalars: + return 1.0 if s > 0.5 else 2.0 + else: + return 1.0 - s + + high = 2 if should_use_simpler_scalars else 9 + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + scalarlist_list = [] + scalarlist_list.append([random.randint(0, high) + 1 for _ in range(num_tensors)]) + + if allow_higher_dtype_scalars or dtype.is_floating_point: + scalarlist_list.append([sample_float() for _ in range(num_tensors)]) + if allow_higher_dtype_scalars or dtype.is_complex: + scalarlist_list.append([complex(sample_float(), sample_float()) for _ in range(num_tensors)]) + scalarlist_list.append([1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)]) + scalarlist_list.append([True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)]) + return scalarlist_list + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + scalars = [] + scalars.append(random.randint(1, high + 1)) + if allow_higher_dtype_scalars or dtype.is_floating_point: + scalars.append(sample_float()) + if allow_higher_dtype_scalars or dtype.is_complex: + scalars.append(complex(sample_float(), sample_float())) + scalars.append(True) + return scalars + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + if self.arity == 1: + if "foreach_abs" in opinfo.name and dtype in complex_types(): + return True + # unary + if opinfo.ref in (torch.abs, torch.neg): + return False + if opinfo.ref_inplace in (torch.Tensor.zero_,): + return False + return dtype in integral_types_and(torch.bool) + if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor: + return None + if "foreach_pow" in opinfo.name and dtype in integral_types_and(torch.bool): + return True + if any( + foreach_name in opinfo.name + for foreach_name in ("foreach_clamp_max", "foreach_clamp_min", "foreach_maximum", "foreach_minimum") + ) and dtype in integral_types_and(torch.bool): + return True + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if "foreach_add" in opinfo.name and dtype == torch.bool: + disable_fastpath = True + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.Scalar: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if isinstance(rightmost_arg, bool): + disable_fastpath |= dtype == torch.bool + if opinfo.ref in (torch.add, torch.mul): + disable_fastpath = False + elif isinstance(rightmost_arg, int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg, float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg, complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}") + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.ScalarList: + disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool) + elmt_t = type(rightmost_arg[0]) + has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg) + if not has_same_type: + return dtype not in complex_types() + if isinstance(rightmost_arg[0], bool): + if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool: + disable_fastpath = False + elif isinstance(rightmost_arg[0], int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg[0], float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg[0], complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalarlist of {rightmost_arg}") + return disable_fastpath + else: + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param: + if dtype in integral_types_and(torch.bool): + kwargs["alpha"] = 3 + elif dtype.is_complex: + kwargs["alpha"] = complex(3, 3) + else: + kwargs["alpha"] = 3.14 + if self.arity > 1: + kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype) + return kwargs + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) + for rightmost_arg_type in self._rightmost_arg_types: + zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs) + zero_size_foreach_inputs_kwargs["zero_size"] = True + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + if self.arity > 1: + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + args.append( + self._sample_rightmost_arg( + opinfo, + ForeachRightmostArgType.TensorList, + device, + dtype, + NUM_SIZE0_TENSORS, + allow_higher_dtype_scalars=allow_higher_dtype_scalars, + **zero_size_foreach_inputs_kwargs, + )[0]) + kwargs = self._sample_kwargs( + opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype) + else: + args = [] + kwargs = {} + if opinfo.ref in (torch.abs, torch.neg): + kwargs["disable_fastpath"] = False + else: + kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + _foreach_inputs_kwargs["zero_size"] = False + allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) + + # add empty tensor interspersion to test fully fixing #100701 + for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( + num_input_tensors, self._rightmost_arg_types, self._intersperse_empty): + if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'): + # generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy + continue + _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors + input = sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + args = [] + if self.arity > 1: + args = [ + sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, allow_higher_dtype_scalars, + **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + args.append(rightmost_arg) + kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype) + ref_args = args + if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor): + ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]] + sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs) + yield sample + args.pop() + else: + yield ForeachSampleInput( + input, + *args, + disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype), + ) + + +class foreach_max_sample_func(foreach_inputs_sample_func): + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor) + self._intersperse_empty = (False,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + return [] + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return False + + +class foreach_norm_sample_func(foreach_inputs_sample_func): + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for ord in (0, 1, 2, -1, -2, float('inf'), float('-inf')): + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors = kwargs.pop("num_input_tensors", foreach_num_tensors) + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + _allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) + + for num_tensors, ord, out_dtype in product( + num_input_tensors, + (0, 1, 2, -1, -2, float('inf'), float('-inf')), + (None,) + (torch.complex128,) if dtype in complex_types() else (torch.float64,), + ): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath, dtype=out_dtype) + + # Also test nan propagation with a single tensor, but skip autograd testing + if not requires_grad: + nan_inputs = [ + [float('nan')], + [float('nan'), 1.0], + [1.0, float('nan')], + [1.0, 2.0, 3.0, float('nan'), float('nan'), 7.0, float('nan'), float('nan'), -1.5, 6.0], + [7.0, 3.0, float('nan'), float('nan'), -1.5, 6.0], + [3.0, float('nan'), float('nan'), -1.5, 6.0], + ] + for input in nan_inputs: + x = torch.tensor(input, device=device) + disable_fastpath = True + if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput([x], ord=ord, disable_fastpath=disable_fastpath) + + +class foreach_pointwise_sample_func(foreach_inputs_sample_func): + + def __init__( + self, + arity: int = 3, + rightmost_supports_scalar: bool = False, + rightmost_supports_scalarlist: bool = False, + ): + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist) + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return dtype in integral_types_and(torch.bool) and opinfo.ref in (torch.addcmul,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + # zero_size tensor + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + for _ in range(2) + ] + if "scalars" in kwargs: + del kwargs["scalars"] + kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) + + for num_tensors, rightmost_arg_type in itertools.product(num_input_tensors, self._rightmost_arg_types): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList)) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, + rightmost_arg_type, + device, + dtype, + num_tensors, + zero_size=False, + allow_higher_dtype_scalars=allow_higher_dtype_scalars, + **_foreach_inputs_kwargs, + ) + for rightmost_arg in rightmost_arg_list: + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.append(rightmost_arg) + elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]: + kwargs["scalars"] = rightmost_arg + else: + kwargs["value"] = rightmost_arg + kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)) + assert len(args) == 2, f"{len(args)=}" + sample = ForeachSampleInput(input, *args, **kwargs) + yield sample + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.pop() + + +foreach_unary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + 'exp', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'acos', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'asin', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'atan', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'cos', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'cosh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log10', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'log2', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'tan', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + toleranceOverride( + { + torch.complex64: tol(atol=3e-04, rtol=2e-05) + } + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'tanh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + toleranceOverride( + {torch.complex64: tol(atol=5e-03, rtol=1e-04)} + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'sin', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'sinh', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'neg', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_unary_op_tensors_on_different_devices", + device_type="cuda", + dtypes=(torch.bool,), + ), + ), + ), + ForeachFuncInfo( + 'sqrt', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'ceil', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'erf', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'erfc', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'expm1', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'floor', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'log1p', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'round', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'frac', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=integral_types_and(torch.bool) + complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'reciprocal', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'sigmoid', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + backward_requires_result=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + ), + ), + ForeachFuncInfo( + 'trunc', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'abs', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + device_type="cpu", + dtypes=(torch.bool,), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()), + ), + ), + ForeachFuncInfo( + 'zero', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_out=False, + ), + ForeachFuncInfo( + 'sign', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), + ForeachFuncInfo( + 'lgamma', + sample_inputs_func=foreach_inputs_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), + # DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + # "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_inplace", + dtypes=complex_types() + integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=complex_types(), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + ), + ), +] + +foreach_binary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + "add", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_alpha_param=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # These tests fail with aten._local_scalar_dense not being implemented. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=integral_types() + complex_types_and(torch.bool, torch.bfloat16, torch.float16, torch.float64)), + ), + ), + ForeachFuncInfo( + "sub", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_alpha_param=True, + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "mul", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=(torch.bool,)), + ), + ), + ForeachFuncInfo( + "div", + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=integral_types_and(torch.bool)), + ), + ), + ForeachFuncInfo( + "clamp_min", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + ForeachFuncInfo( + "clamp_max", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "minimum", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=False, + supports_forward_ad=False, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "maximum", + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_forward_ad=False, + supports_inplace_autograd=False, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=complex_types_and(torch.bool)), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + device_type="cuda", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_scalar_with_overlapping_tensors", + dtypes=complex_types(), + ), + ), + ), + ForeachFuncInfo( + "pow", + supports_alpha_param=False, + supports_scalar_self_arg=True, + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,),), + DecorateInfo(unittest.skip("flaky"), "TestForeach", "test_parity", device_type="cpu", dtypes=(torch.complex64,)), + DecorateInfo( + unittest.skip("failed starting on ROCm 6.2"), + "TestForeach", + "test_parity", + device_type="cuda", + dtypes=(torch.complex64,), + active_if=TEST_WITH_ROCM), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_binary_op_with_scalar_self_support", + device_type="cuda", + dtypes=(torch.bool,), + active_if=lambda kwargs: kwargs["is_fastpath"], + ), + ), + backward_requires_result=True, + ), + ForeachFuncInfo( + "copy", + sample_inputs_func=foreach_inputs_sample_func(2, False, False), + supports_out=False, + supports_forward_ad=False, + supports_autograd=False, + supports_inplace_autograd=False, + ) +] + +foreach_pointwise_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "addcmul", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.bool,)), + # # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=integral_types() + complex_types_and(torch.bool)), + ), + ), + ForeachFuncInfo( + "addcdiv", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=integral_types() + complex_types_and(torch.bool)), + # fails with div_cpu is not implemented with ComplexHalf + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=integral_types() + complex_types_and(torch.bool)), + ), + ), +] + +foreach_reduce_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "max", + sample_inputs_func=foreach_max_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + # no complex support for ordering ops like max + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_autodiff", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_foreach_reduce_large_input", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=(torch.complex128, torch.complex64), + ), + ), + ), + ForeachFuncInfo( + "norm", + sample_inputs_func=foreach_norm_sample_func(1, False, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo( + unittest.expectedFailure, + "TestForeach", + "test_foreach_reduce_large_input", + device_type="cuda", + dtypes=integral_types_and(torch.bool), + ), + ), + ), +] + +foreach_other_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "lerp", + sample_inputs_func=foreach_inputs_sample_func(3, True, False), + supports_autograd=True, + supports_inplace_autograd=True, + supports_forward_ad=True, + decorators=( + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_outplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo( + unittest.expectedFailure, + "TestMeta", + "test_dispatch_symbolic_meta_inplace", + dtypes=integral_types_and(torch.bool), + ), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool)), + ), + ), +] + +def reference_sign(x): + if x.dtype == np.bool_: + # `np.sign` doesn't support `bool`. + # >>> np.sign(True) + # ufunc 'sign' did not contain a loop + # with signature matching types dtype('bool') -> dtype('bool') + return np.sign(x, dtype=np.uint8).astype(np.bool_) + return np.sign(x) + + +def reference_sgn(x): + # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. + # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. + # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) + if x.dtype not in [np.complex64, np.complex128]: + return reference_sign(x) + + out = (x / np.abs(x)) + if out.ndim == 0: + # Handle x == 0 case + if (x == 0): + # Can't assign to np.complex object + # So make a new one. + return np.array(complex(0, 0), dtype=x.dtype) + return out + + # Handle x == 0 case + mask = (x == 0) + out[mask] = complex(0, 0) + return out + + +def reference_sigmoid(x): + # 'scipy.special.expit' not supported for the input types + if x.dtype in [np.complex64, np.complex128]: + return (1 / (1 + np.exp(-x))) + return scipy.special.expit(x) + + +def reference_logsigmoid(x): + return np.where( + x < 0, + x - np.log1p(np.exp(x)), + -np.log1p(np.exp(-x))) + + +def reference_hardsigmoid(x): + intermediate = x / 6 + 0.5 + y = np.clip(intermediate, 0, None) + return np.where(y > 1, 1, y).astype(x.dtype) + + +def reference_lgamma(x): + # scipy.special.gammaln returns `-inf` when input is `-inf`. + # While Pytorch, C and C++, all return `inf` when input is `-inf`. + # Reference: + # https://en.cppreference.com/w/cpp/numeric/math/lgamma + # https://en.cppreference.com/w/c/numeric/math/lgamma + + # To handle the above discrepancy, + # we replace -inf with inf so values + # that were originally -inf map to inf as expected + if x.dtype.kind == 'f': + x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) + + out = scipy.special.gammaln(x) + + if x.dtype == np.float16: + # `scipy.special.gammaln` returns output of float32 when input is float16, + # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, + # Pytorch version outputs `inf` while SciPy returns finite values. + out = out.astype(np.float16) + + return out + + +def reference_mvlgamma(x, d): + if x.dtype == np.float16: + return scipy.special.multigammaln(x, d).astype(np.float16) + + return scipy.special.multigammaln(x, d) + +def reference_softplus(input, beta=1, threshold=20): + non_linear = input * beta <= threshold + output = input.copy() + output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta + return output + +def reference_gelu(X, *, approximate='none'): + def _gelu_ref(X): + return X * stats.norm.cdf(X) + + def _tanh_gelu_ref(X): + M_SQRT_2_PI = math.sqrt(2 / math.pi) + Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) + return 0.5 * X * (1.0 + np.tanh(Z)) + + if approximate == 'tanh': + return _tanh_gelu_ref(X) + else: + return _gelu_ref(X) + + +def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: + if num_classes == -1: + num_classes = int(np.amax(a) + 1) + + idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes + one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) + np.put(one_hot, idcs, 1) + return one_hot.reshape(*a.shape, -1) + + +def reference_mse_loss(input, target, reduction="mean"): + se = (input - target) ** 2 + if reduction == "mean": + return np.mean(se) + elif reduction == "sum": + return np.sum(se) + else: # reduction == "none" + return se + + +def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): + return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0] + + +def reference_native_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight, bias, eps): + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + if weight is None and bias is not None: + Y = Y + bias.reshape(-1) + elif weight is not None and bias is None: + Y = Y * weight.reshape(-1) + elif weight is not None and bias is not None: + Y = Y * weight.reshape(-1) + bias.reshape(-1) + axis = inp.ndim - len(normalized_shape) + stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape) + return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape) + + +def reference_rms_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, eps=None): + if eps is None: + eps = torch.finfo(numpy_to_torch_dtype(inp.dtype)).eps + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + rms = np.sqrt((inp_view**2).mean(axis=-1, keepdims=True) + eps) + Y = inp_view / rms + if weight is not None: + Y = Y * weight.reshape(-1) + return Y.reshape(*inp.shape) + + +def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): + inp_view = inp + if np.prod(inp.shape) != 0: + inp_view = inp.reshape((inp.shape[0], num_groups, -1)) + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + Y = Y.reshape(inp.shape) + if weight is not None: + # weight is a vector of length equal to the channel + if len(Y.shape) > 2: + weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y * weight + if bias is not None: + # bias is a vector of length equal to the channel + if len(Y.shape) > 2: + bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y + bias + return Y + + +# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't +# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into +# stacked 1D cases +def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): + side = 'right' if (right or side == 'right') else 'left' + if len(sorted_sequence.shape) == 1 : + ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) + return ret.astype(np.int32) if out_int32 else ret + elif sorted_sequence.shape[0] == 0: + if sorter is not None: + sorter = sorter.flatten() + ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) + ret = ret.astype(np.int32) if out_int32 else ret + return ret.reshape(boundary.shape) + else: + # numpy searchsorted only supports 1D inputs so we split up ND inputs + orig_shape = boundary.shape + num_splits = np.prod(sorted_sequence.shape[:-1]) + splits = range(0, num_splits) + sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) + if sorter is not None: + sorter = sorter.reshape(num_splits, -1) + + split_sequence = [sorted_sequence[i] for i in splits] + split_boundary = [boundary[i] for i in splits] + split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] + + split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) + for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] + split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret + return np.stack(split_ret).reshape(orig_shape) + +def loss_reference_reduction_wrapper(fn): + def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): + if size_average is not None or reduce is not None: + raise RuntimeError( + "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" + ) + output = fn(input, target, **other_kwargs) + if reduction == "mean": + return np.mean(output) + elif reduction == "sum": + return np.sum(output) + else: # reduction == "none" + return output + + return wrapper + +@loss_reference_reduction_wrapper +def reference_smooth_l1_loss(input, target, beta=1.0): + diff = input - target + abs_diff = np.abs(diff) + above_threshold = abs_diff >= beta + + loss = np.empty_like(input) + loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta + loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) + + return loss + +def reference_std_var(f): + """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" + g = reference_reduction_numpy(f) + + @wraps(g) + def wrapper(x: np.ndarray, *args, **kwargs): + assert not ('unbiased' in kwargs and 'correction' in kwargs) + + if 'unbiased' in kwargs: + kwargs['ddof'] = int(kwargs.pop('unbiased')) + elif 'correction' in kwargs: + kwargs['ddof'] = kwargs.pop('correction') + + return g(x, *args, **kwargs) + + return wrapper + +def generate_std_var_kwargs(t: torch.Tensor, **kwargs): + """Generates unbiased/correction kwargs for std/var operators""" + yield ((), {'unbiased': True}) + yield ((), {'unbiased': False}) + + # Currently, calling std with correction is only enabled when + # both dim and keepdim are provided. + if 'dim' in kwargs and 'keepdim' in kwargs: + yield ((), {'correction': 0}) + yield ((), {'correction': 1}) + + numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() + yield ((), {'correction': numel // 2}) + +def error_inputs_mean(op_info, device, is_ref=False, **kwargs): + if is_ref: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []), + error_regex=err_msg1, + ) + + if is_ref: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.float32, device=device), + [], + dtype=torch.int64), + error_regex=err_msg2 + ) + + if is_ref: + err_msg3 = "Expected out tensor to have dtype torch.float64, but got torch.float32 instead" + else: + err_msg3 = "Expected out tensor to have dtype double, but got float instead" + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.int64, device=device), + [], + dtype=torch.float64, + out=make_tensor([], dtype=torch.float32, device=device), + ), + error_regex=err_msg3 + ) + +# numpy implementation of torch.flatten +# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape +def reference_flatten(input, start_dim=0, end_dim=-1): + in_shape = input.shape + in_rank = len(in_shape) + for d in start_dim, end_dim: + if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank): + raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank - 1}], but got {d}") + end_dim = end_dim if end_dim >= 0 else in_rank + end_dim + start_dim = start_dim if start_dim >= 0 else in_rank + start_dim + if in_rank == 0: + end_dim = start_dim + if end_dim < start_dim: + raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim") + flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1) + out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:] + return np.reshape(input, out_shape) + + +def sample_inputs_alias_copy(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad)) + yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) + + +# Operator database (sorted alphabetically) +op_db: List[OpInfo] = [ + UnaryUfuncInfo('abs', + aliases=('absolute', ), + ref=np.abs, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_grad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients', + 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs", + "test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) + # We can break the logic of the loop over all possible types but it is OK. + # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + ), + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True), + # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) + UnaryUfuncInfo('acos', + aliases=('arccos', ), + ref=np.arccos, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-1, + torch.complex64: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS),)), + # NOTE: the derivative for inplace acosh is not implemented + UnaryUfuncInfo('acosh', + aliases=('arccosh', ), + ref=np.arccosh, + domain=(1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + # acosh is not defined at x < 1 (real) + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)), + safe_val=2)), + BinaryUfuncInfo('add', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.float16, torch.chalf), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_add_sub, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestCommon', + 'test_numpy_refs', + dtypes=(torch.complex128,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('item', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs), + ref=np.ndarray.item, + method_variant=None, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool), + supports_out=False, + supports_autograd=False, + error_inputs_func=error_inputs_item, + sample_inputs_func=sample_inputs_item, + skips=( + # Error testing item function variant + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32, torch.complex64)), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: Composite compliance check failed with the above error. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'), + )), + OpInfo('arange', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_out=True, + supports_autograd=False, + is_factory_function=True, + error_inputs_func=error_inputs_arange, + sample_inputs_func=sample_inputs_arange, + skips=( + # https://github.com/pytorch/pytorch/issues/81774 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Lazy tensor failures + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + + # Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608 + # We don't have an op for aten::arange but it isn't a special case. + # Argument types: bool, bool, bool, int, int, Device, boo + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + + # Captured graph does not contain aten::arange (succeeds on complex!) + # g: graph(): + # %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]() + # return (%25) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('cauchy', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.cauchy_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_cauchy, + error_inputs_func=error_inputs_cauchy, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('exponential', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.exponential_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_exponential, + error_inputs_func=error_inputs_exponential, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('geometric', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.geometric_, + dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_geometric, + error_inputs_func=error_inputs_geometric, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('log_normal', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.log_normal_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_log_normal, + error_inputs_func=error_inputs_log_normal, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('normal', + variant_test_name='in_place', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.normal_, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_normal, + error_inputs_func=error_inputs_normal, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + )), + OpInfo('uniform', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs), + method_variant=None, + inplace_variant=torch.Tensor.uniform_, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_autograd=False, + is_factory_function=False, + allow_cow_input_materialize_forward=[0], + sample_inputs_func=sample_inputs_uniform, + error_inputs_func=error_inputs_uniform, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # aten.uniform was not decomposed + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('clamp_max', + ref=_clamp_max_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('clamp_min', + ref=_clamp_min_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('mul', + aliases=('multiply',), + dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + error_inputs_sparse_func=error_inputs_sparse_mul, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)), + BinaryUfuncInfo('sub', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), + aliases=('subtract',), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_add_sub, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + OpInfo('addmm', + # This addmm OpInfo is for when alpha and beta are not both equal to 1. + # alpha=beta=1 is tested in the following opinfo, because that special case will + # trigger addmm being decomposed by a jit pass. + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_addmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('addmm', + # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. + variant_test_name='decomposed', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], + sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # https://github.com/pytorch/pytorch/issues/71784 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.float16,)), + )), + OpInfo('addmv', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + sample_inputs_func=sample_inputs_addmv), + OpInfo('addbmm', + ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), + np.multiply(np.asarray(alpha, dtype=batch1.dtype), + np.sum(np.matmul(batch1, batch2), axis=0))), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_refs'), + # MPS has slightly worse precision. Is this acceptable? + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + 'TestConsistency', + 'test_output_match', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}), + 'TestCommon', 'test_out'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=6e-3, rtol=1e-2)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # addbmm does not correctly warn when resizing out= inputs + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # https://github.com/pytorch/pytorch/issues/55907 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addbmm), + OpInfo('baddbmm', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], + torch.complex64, torch.complex128), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view', device_type='cuda'), + ], + sample_inputs_func=sample_inputs_baddbmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('dot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('vdot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('bmm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", "test_out") + ), + sample_inputs_func=sample_inputs_bmm), + OpInfo('mv', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mv), + OpInfo('addr', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + # Reference: https://github.com/pytorch/pytorch/issues/50747 + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/50747 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), + ), + sample_inputs_func=sample_inputs_addr, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('addcmul', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + OpInfo('addcdiv', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + UnaryUfuncInfo('asin', + aliases=('arcsin', ), + ref=np.arcsin, + domain=(-1, 1), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda' + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=8e-5, rtol=4e-5)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' + ), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + # NOTE: derivative for inplace asinh is not implemented + UnaryUfuncInfo('asinh', + aliases=('arcsinh', ), + ref=np.arcsinh, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('atan', + aliases=('arctan', ), + ref=np.arctan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + BinaryUfuncInfo('atan2', + aliases=('arctan2',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + )), + UnaryUfuncInfo('atanh', + aliases=('arctanh', ), + ref=np.arctanh, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=[ + precisionOverride({torch.bfloat16: 1e-2}), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=9e-3, rtol=8e-5)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + ], + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('allclose', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=np.allclose, + supports_autograd=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_allclose, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_out=False), + OpInfo('broadcast_to', + ref=np.broadcast_to, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_broadcast_to), + OpInfo('broadcast_shapes', + op=torch.broadcast_shapes, + ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, + dtypes=_dispatch_dtypes((torch.float32,)), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + supports_autograd=False, + supports_scripting=False, + sample_inputs_func=sample_inputs_broadcast_shapes, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # skip dtype tests since broadcast_shape is not device dependent. + # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('broadcast_tensors', + ref=np.broadcast_arrays, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_broadcast_tensors, + reference_inputs_func=reference_inputs_broadcast_tensors, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + )), + OpInfo('block_diag', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Default batching rule in core doesn't work for ops with TensorList args + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_block_diag), + UnaryUfuncInfo('bitwise_not', + ref=np.bitwise_not, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.invert, + supports_autograd=False), + BinaryUfuncInfo('bitwise_left_shift', + op=torch.bitwise_left_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.lshift, + inplace_operator_variant=operator.ilshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('bitwise_right_shift', + op=torch.bitwise_right_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.rshift, + inplace_operator_variant=operator.irshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('combinations', + op=torch.combinations, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_combinations), + OpInfo('cartesian_prod', + op=torch.cartesian_prod, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_cartesian_prod, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, + 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + )), + OpInfo('cdist', + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_cdist), + UnaryUfuncInfo('ceil', + ref=np.ceil, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('cholesky', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), + OpInfo('cholesky_inverse', + dtypes=floating_and_complex_types(), + backward_dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_gradgrad=True, + sample_inputs_func=sample_inputs_linalg_cholesky_inverse, + gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), + OpInfo('cholesky_solve', + op=torch.cholesky_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_cholesky_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + OpInfo('chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('unsafe_chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + check_batched_forward_grad=False, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('clone', + ref=np.copy, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format' + # (NumPy reference needs to be extended with memory_format) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + ),), + OpInfo('contiguous', + op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_fusible_nodes=['aten::contiguous'], + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('sum_to_size', + op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sum_to_size, + error_inputs_func=error_inputs_sum_to_size, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), + )), + OpInfo('clamp', + aliases=('clip',), + ref=_clamp_numpy, + dtypes=all_types_and(torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_clamp, + reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NNC appear to not handle boolean clamp + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + # MPS does not support float64, while numpy does internal computations in float64. + # See https://github.com/pytorch/pytorch/blob/3c1cf03fde145bdbe1f5ffb81765d076c10b4c04/test/test_ops.py#L260-L264 + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('positive', + ref=np.positive, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + ), + UnaryUfuncInfo('conj', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False), + UnaryUfuncInfo('conj_physical', + decomp_aten_name='_conj_physical', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # RuntimeError: inputSet && outputSet + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), + DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), + 'TestSparseUnaryUfuncs', 'test_inplace'), + )), + OpInfo('resolve_conj', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('resolve_neg', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('view_as_real', + dtypes=complex_types(), + supports_forward_ad=True, + supports_out=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_view_as_real, + test_conjugated_samples=False, + ), + OpInfo('view_as_complex', + dtypes=floating_types_and(torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + test_neg_view=False, + sample_inputs_func=sample_inputs_view_as_complex, + skips=( + # RuntimeError: Tensor must have a last dimension with stride 1 + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), + # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + BinaryUfuncInfo('complex', + dtypes=floating_types_and(torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + error_inputs_func=error_inputs_complex, + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)), + BinaryUfuncInfo('copysign', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('corrcoef', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_corrcoef, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False), + UnaryUfuncInfo('cos', + ref=np.cos, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('cosh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + OpInfo('cov', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cov, + error_inputs_func=error_inputs_cov, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Float did not match double + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Jacobian mismatch + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=8e-3, rtol=1.4e-3)}), + "TestInductorOpInfo", "test_comprehensive", device_type="cpu"), + )), + OpInfo('cross', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cross, + supports_fwgrad_bwgrad=True, + supports_out=True, + supports_forward_ad=True), + OpInfo('cumsum', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumsum does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + sample_inputs_func=sample_inputs_cumulative_ops), + OpInfo('cumprod', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumprod does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + # gradgradcheck fails in fast_mode=True: #56275 + sample_inputs_func=sample_inputs_cumprod, + gradcheck_fast_mode=False), + OpInfo('cummax', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('cummin', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + UnaryUfuncInfo('deg2rad', + ref=np.radians, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + OpInfo('diff', + op=torch.diff, + # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append + # are set as None when converting to numpy + ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( + np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) + ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diff, + error_inputs_func=error_inputs_diff, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='no_rounding_mode', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True),), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='trunc_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + # FIXME: + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for + # output 0 with respect to input 1, + # numerical:tensor(-17746.9307, dtype=torch.float64) + # analytical:tensor(0., dtype=torch.float64) + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', + 'test_fn_grad', device_type='cpu', + dtypes=(torch.float64,)), + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='floor_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + # FIXME: + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for + # output 0 with respect to input 1, + # numerical:tensor(-17746.9307, dtype=torch.float64) + # analytical:tensor(0., dtype=torch.float64) + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', + 'test_fn_grad', + dtypes=(torch.float64,), + device_type='cpu'), + )), + BinaryUfuncInfo('true_divide', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True)), + OpInfo('equal', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=lambda input, other: (input == other).all(), + sample_inputs_func=sample_inputs_equal, + supports_autograd=False, + supports_tracing=False, + skips=( + )), + UnaryUfuncInfo('exp', + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + OpInfo('expand', + op=lambda self, shape: self.expand(shape), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_expand, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('expand_as', + op=lambda self, other: self.expand_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_expand_as, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), + ), + OpInfo('expand_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_expand, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + supports_out=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + )), + OpInfo('diag', + ref=np.diag, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_diag, + error_inputs_func=error_inputs_diag), + OpInfo('diag_embed', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal', + aten_backward_name='diagonal_backward', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_scatter), + OpInfo('alias_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_alias_copy, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + BinaryUfuncInfo('eq', + ref=np.equal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + always_returns_bool=True, + supports_autograd=False, + sample_inputs_func=sample_inputs_comparison_ops, + skips=( + )), + BinaryUfuncInfo('fmax', + op=torch.fmax, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmin', + op=torch.fmin, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmod', + ref=np.fmod, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + # FIXME: + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for + # output 0 with respect to input 1, + # numerical:tensor(101.6283, dtype=torch.float64) + # analytical:tensor(-18.3575, dtype=torch.float64) + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', + 'test_fn_grad', + dtypes=(torch.float64,), + device_type='cpu'), + )), + BinaryUfuncInfo('remainder', + ref=np.remainder, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + operator_variant=operator.mod, + inplace_operator_variant=operator.imod, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + # Fails on XLA + # False is not true : Tensors failed to compare as equal! + # Attempted to compare equality of tensors with different dtypes + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + # FIXME: + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for + # output 0 with respect to input 1, + # numerical:tensor(102.4676, dtype=torch.float64) + # analytical:tensor(-17.5182, dtype=torch.float64) + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', + 'test_fn_grad', device_type='cpu', + dtypes=(torch.float64,)), + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=5e-4, rtol=3e-3), + }), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + )), + UnaryUfuncInfo('frac', + ref=lambda x: np.modf(x)[0], + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + # 76047 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.bfloat16, torch.float32, torch.float64)), + )), + OpInfo('stft', + decorators=[ + skipCPUIfNoFFT, + DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ], + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_stft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + ), + OpInfo('istft', + dtypes=complex_types(), + sample_inputs_func=sample_inputs_istft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + decorators=( + DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ), + skips=( + skipCPUIfNoFFT, + # gradcheck fails on ROCm (gh-68429) + # grad is computed improperly (probably for weights tensor) + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + )), + UnaryUfuncInfo('floor', + ref=np.floor, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('flip', + op=torch.flip, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_flip, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('fliplr', + op=torch.fliplr, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_fliplr, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('flipud', + op=torch.flipud, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_flipud, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('sparse.sampled_addmm', + dtypes=floating_and_complex_types(), + supports_autograd=True, + sample_inputs_func=sample_inputs_sparse_sampled_addmm, + decorators=[ + skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3)) + or (_get_torch_rocm_version() >= (5, 2))), + "cusparseSDDMM was added in 11.2.1"), + skipCPUIfNoMklSparse, ], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype. + # RuntimeError: Sparse CSR tensors do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # NotImplementedError: Could not run 'aten::sparse_sampled_addmm' with arguments from the 'SparseCsrMeta' backend. + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + )), + OpInfo('sparse.mm', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + variant_test_name='reduce', + supports_autograd=True, + supports_out=False, + supports_gradgrad=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_sparse_mm_reduce, + decorators=[onlyCPU], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # RuntimeError: Sparse CSR tensors do not have is_contiguou + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'), + # NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCsrMeta' backend + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), + )), + UnaryUfuncInfo('i0', + ref=np_unary_ufunc_integer_promotion_wrapper( + scipy.special.i0) if TEST_SCIPY else None, + aliases=('special.i0',), + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_i0_i1, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.int8,)), + )), + BinaryUfuncInfo('floor_divide', + ref=_floor_divide_np, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_autograd=False, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + supports_two_python_scalars=True, + skips=( + # AssertionError: Results of original model and exported/imported version of model differed + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + )), + UnaryUfuncInfo('frexp', + op=torch.frexp, + ref=np.frexp, + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=[], + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, + # while theses tests currently requires output to a single tensor. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + + # skips test_reference_numerics due to error in Windows CI. + # The np.frexp returns exponent as np.intc dtype on Windows platform, + # and np.intc does not have the correspond torch dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('log1p', + ref=np.log1p, + aliases=('special.log1p',), + domain=(-1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + promotes_int_to_float=True), + BinaryUfuncInfo('ge', + ref=np.greater_equal, + aliases=('greater_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('geqrf', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + supports_autograd=False, + skips=( + # FIXME: geqrf can't forward with complex inputs that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + BinaryUfuncInfo('gt', + ref=np.greater, + aliases=('greater',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + UnaryUfuncInfo('imag', + ref=np.imag, + dtypes=complex_types_and(torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo('gradient', + dtypes=floating_and_complex_types_and(torch.int8, torch.int16, + torch.int32, torch.int64, + torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # following tests give a runtime error with undefined value tensor + # see discussion : https://github.com/pytorch/pytorch/issues/56660 + # RuntimeError: + # Arguments for call are not valid. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_gradient, + error_inputs_func=error_inputs_gradient), + OpInfo('isin', + dtypes=all_types(), + dtypesIfCUDA=all_types_and(torch.half), + supports_autograd=False, + sample_inputs_func=sample_inputs_isin), + OpInfo('kthvalue', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kthvalue, + error_inputs_func=error_inputs_kthvalue), + BinaryUfuncInfo('le', + ref=np.less_equal, + aliases=('less_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + UnaryUfuncInfo('log', + ref=np.log, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log10', + ref=np.log10, + domain=(0, None), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log10(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log2', + ref=np.log2, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + # log2(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + BinaryUfuncInfo('ldexp', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + promotes_int_to_float=True, + supports_out=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: mul(): functions with out=... arguments don't support + # automatic differentiation, but one of the arguments requires grad + # https://github.com/pytorch/pytorch/issues/68966 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.complex64: tol(atol=1e-05, rtol=1e-05) + }), + 'TestCommon', device_type='cpu', + ), + ], ), + BinaryUfuncInfo('logaddexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + OpInfo('logaddexp2', + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_logaddexp), + UnaryUfuncInfo('logical_not', + ref=np.logical_not, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + skips=( + # The function variant always returns BoolTensor + # while the inplace variant preserves the input dtype. + # >>> t = torch.randn(3) + # >>> torch.logical_not(t) + # tensor([False, False, False]) + # >>> torch.logical_not(t).dtype + # torch.bool + # >>> t.logical_not_().dtype + # torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + )), + BinaryUfuncInfo('lt', + ref=np.less, + aliases=('less',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('lu_unpack', + op=torch.lu_unpack, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + sample_inputs_func=sample_inputs_lu_unpack), + OpInfo('lu', + op=torch.lu, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # we skip jit tests because `lu` is a torch function + # RuntimeError: + # 'Tensor (inferred)' object has no attribute or method 'lu'.: + # File "", line 3 + # def the_method(i0): + # return i0.lu(True, True) + # ~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('lu_solve', + op=torch.lu_solve, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Tests different backward paths"), + "TestCommon", "test_floating_inputs_are_differentiable"),), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]), + OpInfo('masked_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_masked_fill, + error_inputs_func=error_inputs_masked_fill, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + supports_out=False), + OpInfo('masked_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_scatter, + error_inputs_func=error_inputs_masked_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + skips=( + )), + OpInfo('masked_select', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_select, + error_inputs_func=error_inputs_masked_select, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('matrix_exp', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + aliases=('linalg.matrix_exp',), + sample_inputs_func=sample_inputs_matrix_exp, + # Needs to construct a 2nx2n matrix by copy_ ing into it + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # mexp does not support bf16 and fp16 + DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive', + dtypes=[torch.half], device_type="cpu"), + ), + supports_out=False, + ), + OpInfo('matmul', + aliases=('linalg.matmul',), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False), + decorators=[ + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # ROCm intermittently fails the test with standard atol/rtol + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', + active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_out', device_type='cuda', + active_if=TEST_WITH_ROCM), + # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the + # backward on CPU + DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu'), + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-5, rtol=1e-5), + torch.complex64: tol(atol=1e-5, rtol=1e-5), + }), + "TestDecomp", "test_comprehensive", device_type="cuda", + ), + ], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', + device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + OpInfo('max', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + skips=( + ), + supports_forward_ad=True), + OpInfo('max', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('median', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of median do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_median, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('nanmedian', + dtypes=all_types_and(torch.bfloat16, torch.float16), + # TODO: some signatures of nanmedian do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('var_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}), + "TestInductorOpInfo", "test_comprehensive", device_type="cuda"), + )), + OpInfo('var_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}), + "TestInductorOpInfo", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of std_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=4e-5, rtol=9e-3), + torch.float64: tol(atol=2e-7, rtol=2e-7), + }), + "TestDecomp", + "test_comprehensive", + device_type="cuda" + ), + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=4e-5, rtol=9e-3), + torch.float64: tol(atol=2e-7, rtol=2e-7), + }), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + )), + OpInfo('meshgrid', + variant_test_name='variadic_tensors', + ref=np.meshgrid, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), + skips=[ + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ], + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('meshgrid', + variant_test_name='list_of_tensors', + # Unlike the variant above, we do not use np.meshgrid as a + # ref since it does not officially support list of numpy + # arrays. + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), + skips=[ + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + ], + assert_autodiffed=True, + supports_out=False, + autodiff_nonfusible_nodes=[], + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('min', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + )), + OpInfo('min', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('quantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + OpInfo('nanquantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + BinaryUfuncInfo( + 'max', + aliases=('maximum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'maximum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'min', + aliases=('minimum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo( + 'minimum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + ), + ), + BinaryUfuncInfo('logical_and', + ref=np.logical_and, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_or', + ref=np.logical_or, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_xor', + ref=np.logical_xor, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False, + skips=( + )), + BinaryUfuncInfo('bitwise_and', + ref=np.bitwise_and, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.and_, + inplace_operator_variant=operator.iand, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_or', + ref=np.bitwise_or, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.or_, + inplace_operator_variant=operator.ior, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_xor', + ref=np.bitwise_xor, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.xor, + inplace_operator_variant=operator.ixor, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('heaviside', + ref=lambda a, b: ( + # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 + np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) + ), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + BinaryUfuncInfo('lcm', + ref=np.lcm, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('gcd', + ref=np.gcd, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)),)), + BinaryUfuncInfo('isclose', + ref=np.isclose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_isclose, + error_inputs_func=error_inputs_isclose, + supports_autograd=False, + supports_out=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_refs', dtypes=(torch.complex128,)), + # RuntimeError: Short did not match Int + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + # `softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + aten_name='softmax', + aten_backward_name='_softmax_backward_data', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + variant_test_name="with_dtype", + aten_name='softmax', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo( + '_softmax_backward_data', + op=torch.ops.aten._softmax_backward_data, + aten_name='_softmax_backward_data', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_softmax_backward_data, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + ), + # `softmin` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('nn.functional.softmin', + aten_name='softmin', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=False, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.softmin', + variant_test_name="with_dtype", + aten_name='softmin', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo( + "nn.functional.cross_entropy", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_cross_entropy, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=3e-3, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 + # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked + # 1536 bytes CUDA memory on device 0 + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + DecorateInfo(unittest.skip("FP16 corss_entropy cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + + ) + ), + OpInfo('nn.functional.normalize', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_normalize, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('aminmax', + ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(onlyNativeDeviceTypes,), + supports_autograd=False, + sample_inputs_func=sample_inputs_aminmax, + error_inputs_func=error_inputs_aminmax_amax_amin), + OpInfo('as_strided', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # AssertionError: False is not true : Scalars failed to compare as equal! + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_variant_consistency_eager'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_complex_half_reference_testing'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), + )), + OpInfo('as_strided', + variant_test_name='partial_views', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_partial_views, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: This operator is not Composite Compliant: the + # storage_offset of the tensor was modified directly without + # going through the PyTorch dispatcher. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + + # These fail because the test changes the input's in-memory layout + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', + 'test_make_fx_symbolic_exhaustive_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # Fail but are also flaky + DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'), + DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon', + 'test_non_standard_bool_values'), + # RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a + # storage size of 28 are out of bounds for storage of size 20 + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'), + )), + OpInfo('as_strided_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # AssertionError: False is not true : Scalars failed to compare as equal! + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_variant_consistency_eager'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_complex_half_reference_testing'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + )), + OpInfo('as_strided_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_scatter, + error_inputs_func=error_inputs_as_strided_scatter, + skips=( + DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 + DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 + DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # AssertionError: Tensor-likes are not close! (new_empty_strided.default) + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)), + OpInfo('native_layer_norm', + aten_name='native_layer_norm', + ref=reference_native_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + assert_jit_shape_analysis=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_native_layer_norm, + error_inputs_func=error_inputs_native_layer_norm, + skips=( + # IndexError: tuple index out of range + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'), + # Tests fail when weight=None and bias is defined + # https://github.com/pytorch/pytorch/issues/79705 + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + # JIT test also tries to compute double backward, which fails + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-03, rtol=5e-03)}), + "TestDecomp", "test_comprehensive", device_type="cpu"), + )), + OpInfo('native_batch_norm', + aten_name='native_batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs_native_batch_norm, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + # AssertionError: Booleans mismatch: True is not False + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_native_batch_norm_legit', + aten_name='_native_batch_norm_legit', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs__native_batch_norm_legit, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_batch_norm_with_update', + op=torch.ops.aten._batch_norm_with_update, + aten_name='_batch_norm_with_update', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[3, 4], + allow_cow_input_materialize_backward=[3, 4], + sample_inputs_func=sample_inputs__batch_norm_with_update, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + # _batch_norm_with_update expects contiguous inputs for cudnn and miopen + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type="cuda"), + DecorateInfo(unittest.expectedFailure, + 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides', device_type="cuda"), + # _batch_norm_with_update does not have python bindings + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # aten out variants do not accept out= kwarg, only python out variants + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + ) + ), + OpInfo('nn.functional.cosine_similarity', + aten_name="cosine_similarity", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1.3e-5, rtol=2e-2)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + ], + sample_inputs_func=sample_inputs_cosine_similarity), + OpInfo('nn.functional.adaptive_avg_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool1d, + sample_inputs_func=sample_inputs_adaptive_avg_pool1d), + OpInfo('nn.functional.adaptive_avg_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool2d, + sample_inputs_func=sample_inputs_adaptive_avg_pool2d), + OpInfo('nn.functional.adaptive_avg_pool3d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool3d, + sample_inputs_func=sample_inputs_adaptive_avg_pool3d), + OpInfo('nn.functional.adaptive_max_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool1d, + sample_inputs_func=sample_inputs_adaptive_max_pool1d), + OpInfo('nn.functional.adaptive_max_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool2d, + sample_inputs_func=sample_inputs_adaptive_max_pool2d), + OpInfo('nn.functional.adaptive_max_pool3d', + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool3d, + sample_inputs_func=sample_inputs_adaptive_max_pool3d), + OpInfo('nn.functional.avg_pool1d', + aten_name='avg_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool1d, + sample_inputs_func=sample_inputs_avgpool1d), + OpInfo('nn.functional.avg_pool3d', + aten_name='avg_pool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool3d, + sample_inputs_func=sample_inputs_avgpool3d, + skips=( + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + )), + OpInfo( + "nn.functional.binary_cross_entropy_with_logits", + aten_name="binary_cross_entropy_with_logits", + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + 'TestJit', + 'test_variant_consistency_jit', + dtypes=(torch.float32,) + ), + ), + ), + UnaryUfuncInfo( + 'nn.functional.relu', + aten_name="relu", + ref=lambda a: np.where(a <= 0, 0, a), + supports_autograd=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_activation_relu, + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True), + OpInfo('nn.functional.conv_transpose1d', + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d), + aten_name='conv_transpose1d', + aliases=('conv_transpose1d',), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=5e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64,)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float,)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose2d', + aten_name='conv_transpose2d', + aliases=('conv_transpose2d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose2d, + # Runs very slowly on slow-gradcheck for complex. + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=4e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # AssertionError: None mismatch: torch.complex64 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose3d', + aten_name='conv_transpose3d', + aliases=('conv_transpose3d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and( + torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose3d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }), + 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }), + 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', + active_if=TEST_CUDNN), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}), + "TestMathBits", "test_conj_view", device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=9e-3, rtol=2e-1), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=[torch.complex32], active_if=TEST_WITH_ROCM), + ), + supports_out=False,), + OpInfo('nn.functional.conv1d', + aliases=('conv1d',), + aten_name='conv1d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv1d, + error_inputs_func=error_inputs_conv1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing' + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv2d', + aliases=('conv2d',), + aten_name='conv2d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=partial(sample_inputs_conv2d), + error_inputs_func=error_inputs_conv2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv3d', + aliases=('conv3d',), + aten_name='conv3d', + dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv3d, + error_inputs_func=error_inputs_conv3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + # TF32 + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-3, rtol=1e-3), + torch.complex64: tol(atol=5e-3, rtol=1e-3)}), + 'TestCommon', 'test_noncontiguous_samples', + ), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=5e-5, rtol=5e-6)}), + 'TestMathBits', 'test_conj_view', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-6)}), + 'TestOperators', 'test_vjpvmap', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + # AssertionError: Tensor-likes are not close! + # break slow tests + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.group_norm', + aten_name='group_norm', + aliases=('group_norm',), + ref=reference_group_norm, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_group_norm, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=5e-05, rtol=3e-03)}), + "TestDecomp", + "test_comprehensive", + device_type="cpu" + ), + ], + sample_inputs_func=sample_inputs_group_norm, + reference_inputs_func=reference_inputs_group_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.instance_norm', + # no ref because instance_norm will often have numerical instability (large numbers or nan) + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + allow_cow_input_materialize_forward=['running_mean', 'running_var'], + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_instance_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.layer_norm', + aten_name='layer_norm', + aten_backward_name='layer_norm_backward', + aliases=('layer_norm',), + ref=reference_layer_norm, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), + 'TestCommon', 'test_numpy_refs' + ), + DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'), + ], + sample_inputs_func=sample_inputs_layer_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.rms_norm', + aten_name='rms_norm', + aliases=('rms_norm',), + ref=reference_rms_norm, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rms_norm, + error_inputs_func=error_inputs_rms_norm,), + OpInfo('nn.functional.local_response_norm', + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_local_response_norm,), + OpInfo('constant_pad_nd', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_constant_pad_nd, + supports_out=False, + skips=( + # bool can't be passed to Scalar arguments in JIT tracer because + # BoolType is not a subtype of ScalarType. + DecorateInfo( + unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('nn.functional.pad', + variant_test_name='constant', + aten_name='constant_pad_nd', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='reflect', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate_negative', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_pad_replicate_negative, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Some negative padding cases cause a segfault on MPS + DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='circular', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Difference from is larger with decomposition new_empty_strided.default than original on output 0 + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), + ), + supports_out=False), + OpInfo('nn.functional.hardswish', + aten_name="hardswish", + aten_backward_name='hardswish_backward', + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardswish, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_nonfusible_nodes=["aten::hardswish"]), + OpInfo('nn.functional.unfold', + aten_name='im2col', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_nn_unfold, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # NOTE: this failure may not reproduce consistently on different systems + # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 + DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest-exact', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.uint8), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + # NotImplementedError: The operator 'aten::_upsample_nearest_exact3d.out' is not currently implemented + # for the MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='linear', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bilinear', + supports_fwgrad_bwgrad=True, + supports_autograd=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bicubic', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='trilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='area', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'area'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.upsample_bilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), + reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('_upsample_bilinear2d_aa', + op=torch.ops.aten._upsample_bilinear2d_aa, + aten_name='_upsample_bilinear2d_aa', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestEagerFusionOpInfo', 'test_aot_autograd_symbolic_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo( + "nn.functional.soft_margin_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + # doesn't support grad on target + sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), + error_inputs_func=error_inputs_soft_margin_loss, + ), + OpInfo('nn.functional.upsample_nearest', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo( + "nn.functional.margin_ranking_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_margin_ranking_loss, + error_inputs_func=error_inputs_margin_ranking_loss, + reference_inputs_func=reference_inputs_margin_ranking_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo( + "nn.functional.multi_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multi_margin_loss, + reference_inputs_func=reference_inputs_multi_margin_loss, + error_inputs_func=error_inputs_multi_margin_loss, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + OpInfo( + "nn.functional.multilabel_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multilabel_margin_loss, + reference_inputs_func=reference_inputs_multilabel_margin_loss, + error_inputs_func=error_inputs_multilabel_margin_loss, + ), + OpInfo('nn.functional.leaky_relu', + aliases=None, + aten_name="leaky_relu", + aten_backward_name='leaky_relu_backward', + sample_inputs_func=sample_inputs_leaky_relu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + inplace_variant=lambda x, negative_slope=0.01: + torch.nn.functional.leaky_relu(x, negative_slope, inplace=True), + supports_autograd=True, + assert_autodiffed=True, + supports_gradgrad=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::leaky_relu"]), + OpInfo( + "nn.functional.multilabel_soft_margin_loss", + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=4e-3, rtol=1.3e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 + # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 + # leaked 4096 bytes CUDA memory on device 0 + DecorateInfo( + # Skip instead of expectedFailure because this fails + # locally for me but passes in CI. + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo('nn.functional.avg_pool2d', + aten_name='avg_pool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_avg_pool2d, + sample_inputs_func=sample_inputs_avgpool2d, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('nn.functional.fractional_max_pool2d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + sample_inputs_func=sample_inputs_fractional_max_pool2d, + decorators=( + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.fractional_max_pool3d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + test_neg_view=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_fractional_max_pool3d, + decorators=( + # FIXME: both derivatives are implemented incorrectly + # https://github.com/pytorch/pytorch/issues/69322 + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.max_pool1d', + aten_name='max_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + # RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet. + # Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data() + # to actually allocate memory + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + ), + error_inputs_func=error_inputs_max_pool1d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_pool2d', + aten_name='max_pool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + assert_jit_shape_analysis=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_max_pool2d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('max_pool2d_with_indices_backward', + op=max_pool2d_backward, + # We've defined a custom op, so there's no corresponding aten op + aten_name=None, + method_variant=None, + inplace_variant=None, + operator_variant=None, + inplace_operator_variant=None, + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_max_pool, + skips=( + # We've defined a custom op here, and we don't handle the case where we receive an out kwarg + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit') + )), + OpInfo('nn.functional.max_pool3d', + aten_name='max_pool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + # TODO: investigate nondeterminism + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_max_pool3d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_unpool1d', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad', + device_type='cpu'), + )), + OpInfo('nn.functional.max_unpool1d', + variant_test_name='grad', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool2d', + aten_name='max_unpool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool2d', + variant_test_name='grad', + aten_name='max_unpool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_grad=False, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool3d', + aten_name='max_unpool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool3d', + variant_test_name='grad', + aten_name='max_unpool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.linear', + aten_name='linear', + supports_autograd=True, + supports_gradgrad=True, + sample_inputs_func=sample_inputs_linear, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # linear calls mm under the hood which is nondeterministic on CUDA + # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_expanded_weight=True, + decorators=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('nn.functional.bilinear', + aten_name='bilinear', + supports_autograd=True, + sample_inputs_func=sample_inputs_bilinear, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), + decorators=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-03, rtol=1.3e-03)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.glu', + aten_name='glu', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + sample_inputs_func=sample_inputs_glu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + UnaryUfuncInfo( + 'nn.functional.elu', + aten_backward_name='elu_backward', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.elu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + # Marked as a Unary function because it has some rather odd broadcasting semantics in its + # second argument + UnaryUfuncInfo( + 'nn.functional.prelu', + aten_backward_name='_prelu_kernel_backward', + ref=lambda x, weight: + np.maximum(0., x) + np.minimum(0., x) * + (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + # test_reference_numerics only tests the case when the weight tensor is a scalar + sample_kwargs=sample_kwargs_prelu_scalar_weight, + error_inputs_func=error_inputs_prelu, + sample_inputs_func=sample_inputs_prelu, + reference_inputs_func=reference_inputs_prelu, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + # https://github.com/pytorch/pytorch/issues/68752 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], + ), + UnaryUfuncInfo( + 'nn.functional.celu', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.celu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + UnaryUfuncInfo( + 'nn.functional.rrelu', + aten_backward_name='rrelu_with_noise_backward', + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + (dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)), + sample_inputs_func=sample_inputs_rrelu, + error_inputs_func=error_inputs_rrelu, + decorators=( + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # In-place operations do not play well with forward AD + # https://github.com/pytorch/pytorch/issues/77447 + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', + 'test_inplace_forward_mode_AD'), + # The noise vector that's generated in these tests is not the same elementwise + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + UnaryUfuncInfo( + 'nn.functional.selu', + ref=lambda x, inplace=False: + 1.0507009873554804934193349852946 * ( + np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) + ), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, # depends on 'elu' + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + OpInfo( + 'torch._scaled_mm', + sample_inputs_func=sample_inputs_scaled_mm, + dtypes=empty_types(), + dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,), + supports_out=True, + supports_forward_ad=False, + supports_autograd=False, + decorators=[skipCUDAIf(not SM90OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 9.0')], + skips=( + # Sample inputs isn't really parametrized on dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda'), + # "mul_cuda" not implemented for float8_e4m3fn + # https://github.com/pytorch/pytorch/issues/107256 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.float8_e4m3fn,)), + ) + ), + OpInfo( + 'torch.ops.aten._safe_softmax.default', + dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_safe_softmax, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_cow_input_no_materialize_backward=False, + decorators=[], + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + ), + OpInfo( + 'nn.functional.scaled_dot_product_attention', + op=lambda *args, **kwargs: + wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs), + sample_inputs_func=sample_inputs_scaled_dot_product_attention, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=False, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=[DecorateInfo(toleranceOverride( + {torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ], + skips=( + # When attn mask is a composite tensor this fails backward by returning a none + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + # This is only failing on Linux Bionic 3.10 Cuda 11.6 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', + dtypes=(torch.float32,)), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Forward works for dtype=float64 which is the math path + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # Not implemented for Forward AD + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + device_type='cpu'), + # Not implemented for backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad', + device_type='cpu'), + # CPU and CUDA have inconsistencies for intermediate outputs + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cpu'), + # When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', + device_type='cpu'), + # OpInfo was implemented with a lambda + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO Need to understand what this is testing and why it doesn't work + DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'), + # TODO skip this for now since we can't skip on runtime arch support + DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'), + # skip for sm < 80 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), + # FIXME + DecorateInfo(unittest.skip('test_cow_input does not work with efficient attention on ROCM'), + 'TestCompositeCompliance', 'test_cow_input', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.skip('test_fake_crossref_backward_amp does not work with efficient attention on ROCM'), + 'TestFakeTensor', 'test_fake_crossref_backward_amp', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + DecorateInfo(unittest.skip('test_fake_crossref_backward_no_amp does not work with efficient attention on ROCM'), + 'TestFakeTensor', 'test_fake_crossref_backward_no_amp', + device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION), + # for element 1, was torch.Size([4, 4, 0]) but real shape was torch.Size([16, 3, 0]) + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16, torch.float32], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16, torch.float32], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + # for element 1, was torch.Size([4, 4, 11]) but real shape was torch.Size([16, 11]) + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + device_type="cuda", dtypes=[torch.float32], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION),), + ), + OpInfo( + 'torch.ops.aten._flash_attention_forward', + sample_inputs_func=sample_inputs_flash_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16) + if not SM80OrLater + else custom_types(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")], + skips=( + # for element 1, was torch.Size([4, 4, 11]) but real shape was torch.Size([16, 11]) + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16], active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16], active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16], active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + # Checking the scalar value of the philox seed and offset + # Checking the scalar value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + OpInfo( + 'torch.ops.aten._efficient_attention_forward', + sample_inputs_func=sample_inputs_efficient_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16, torch.float32) + if not SM80OrLater + else custom_types(torch.float16, torch.float32, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + # TODO: Skip because it produces a CUDA illegal memory access for some reason + skip_cow_input_backward=True, + # FIXME: mask_type == 2 (LowerRight) + decorators=[ + skipCUDAIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "This platform doesn't support efficient attention"), + skipCUDAIf(TEST_WITH_ROCM, "Efficient attention on ROCM doesn't support custom_mask_type==2")], + skips=( + # for element 1, was torch.Size([4, 4, 11]) but real shape was torch.Size([16, 11]) + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16, torch.float32], + active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16], active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", device_type="cuda", + dtypes=[torch.float16, torch.bfloat16], active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_FLASH_ATTENTION), + # Checking the scaler value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + UnaryUfuncInfo( + 'nn.functional.silu', + aten_backward_name='silu_backward', + ref=lambda x, inplace=False: x / (1 + np.exp(-x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_autograd=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,), device_type='cpu'), + ), + autodiff_nonfusible_nodes=["aten::silu"], + ), + # TODO: combine this with the nn.functional.silu OpInfo when + # complex autodiff for silu is supported or when + # the forward bug is fixed + # Note: silu errors when given inputs that require grad + # but it doesn't support grad in their dtype + # This is why the dtypes list above passes test_dtypes, + # because it's getting lucky and failing in forward + # because test_dtypes sets requires_grad to True + # THIS IS A BUG + UnaryUfuncInfo( + 'nn.functional.silu', + variant_test_name='complex', + ref=lambda x, inplace=False: + x / (1 + np.exp(-x)), + dtypes=complex_types(), + dtypesIfCUDA=complex_types(), + supports_forward_ad=False, + supports_autograd=False, + assert_autodiffed=False, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,)), + # FIXME: intentionally misreports dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.complex64, torch.cdouble)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.complex64,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.complex64,)))), + UnaryUfuncInfo( + 'nn.functional.hardsigmoid', + aten_backward_name='hardsigmoid_backward', + ref=reference_hardsigmoid, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=False, + supports_forward_ad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], + skips=[ + # still want to test that first derivative works though second derivative isn't supported + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad"), + # produces 0 instead of nan on ROCM + DecorateInfo(unittest.expectedFailure, + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + device_type='cuda', + active_if=(TEST_WITH_ROCM)), ] + ), + UnaryUfuncInfo( + 'nn.functional.logsigmoid', + aten_name="log_sigmoid", + aten_backward_name='log_sigmoid_backward', + ref=reference_logsigmoid, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_autograd=True, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + # autodiff_nonfusible_nodes=["aten::log_sigmoid"], + decorators=[ + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + ], + skips=( + # Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), + ), + ), + UnaryUfuncInfo( + 'nn.functional.mish', + aten_backward_name='mish_backward', + ref=lambda x: x * np.tanh(reference_softplus(x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.mish, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], + ), + UnaryUfuncInfo( + 'nn.functional.softsign', + ref=lambda x: x / (np.abs(x) + 1), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.int, torch.int8)),), + ), + UnaryUfuncInfo( + 'nn.functional.tanhshrink', + ref=lambda x: x - np.tanh(x), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05), + torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + # tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan. + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0) + ), + UnaryUfuncInfo( + 'nn.functional.threshold', + ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype), + dtypes=all_types_and(torch.half, torch.bfloat16), + inplace_variant=lambda x, threshold, value: + torch.nn.functional.threshold(x, threshold, value, inplace=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}, + {'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}), + # TODO(whc) should not need sample_inputs_func, but without it + # kwargs aren't being hooked up properly + sample_inputs_func=sample_inputs_threshold, + ), + OpInfo( + "nn.functional.triplet_margin_loss", + sample_inputs_func=sample_inputs_triplet_margin_loss, + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "nn.functional.triplet_margin_with_distance_loss", + sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # This test cannot handle a callable passed to `distance_function`. If we would use + # `distance_function=None`, the test would pass fine. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + BinaryUfuncInfo('nextafter', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False), + OpInfo( + "to", + op=lambda x, *args, **kwargs: x.to(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_to, + skips=( + # RuntimeError: undefined value cpu + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + # NotImplementedError: Cannot copy out of meta tensor; no data! + DecorateInfo( + unittest.skip("Skipped!"), + "TestMeta", + "test_meta_outplace", + ), + # https://github.com/pytorch/pytorch/issues/84335 + DecorateInfo( + unittest.skip("Skipped!"), + "TestProxyTensorOpInfo", + "test_make_fx_symbolic_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + OpInfo('topk', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_topk), + # Multiple variants for batch_norm to test with and without cuDNN disabled + # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details + OpInfo('nn.functional.batch_norm', + aten_name='batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + allow_cow_input_materialize_forward=[1, 2], + allow_cow_input_materialize_backward=[1, 2], + sample_inputs_func=sample_inputs_batch_norm, + skips=( + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.bfloat16, torch.float16)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}), + 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), + )), + # This variant tests batch_norm with cuDNN disabled only on CUDA devices + OpInfo('nn.functional.batch_norm', + variant_test_name='without_cudnn', + aten_name='batch_norm', + dtypes=empty_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + allow_cow_input_materialize_forward=[1, 2], + allow_cow_input_materialize_backward=[1, 2], + decorators=[onlyCUDA, disablecuDNN], + skips=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}), + 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_batch_norm), + OpInfo( + "nn.functional.binary_cross_entropy", + aten_backward_name='binary_cross_entropy_backward', + sample_inputs_func=sample_inputs_binary_cross_entropy, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + gradcheck_fast_mode=False, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestCudaFuserOpInfo", + ), + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + ), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120783 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + ), + # RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5] + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + ), + skips=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the + # standard entry, second is to run gradcheck tests on the second argument. + BinaryUfuncInfo('igamma', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammainc',), + dtypesIfCUDA=floating_types(), + # TODO: FIXME + supports_rhs_python_scalar=False, + supports_autograd=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implemented grad for both inputs + # BinaryUfuncInfo('igamma', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments. + # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # skips=( + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"),"), + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + BinaryUfuncInfo('igammac', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammaincc',), + dtypesIfCUDA=floating_types(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implementing grad for both inputs + # BinaryUfuncInfo('igammac', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments + # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"), + # ], + # skips=( + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + UnaryUfuncInfo('nn.functional.softshrink', + aten_name="softshrink", + aten_backward_name='softshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_softshrink, + error_inputs_func=error_inputs_softshrink), + UnaryUfuncInfo('nn.functional.hardshrink', + aten_name="hardshrink", + aten_backward_name='hardshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardshrink, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardshrink"]), + UnaryUfuncInfo('nn.functional.hardtanh', + aten_name="hardtanh", + aten_backward_name='hardtanh_backward', + dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16), + backward_dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardtanh, + error_inputs_func=error_inputs_hardtanh, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardtanh"]), + OpInfo('nn.functional.gelu', + aten_name="gelu", + aten_backward_name='gelu_backward', + ref=reference_gelu if TEST_SCIPY else None, + error_inputs_func=error_inputs_gelu, + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_gelu, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::gelu"], + skips=( + # AssertionError: Tensor-likes are not close! + # May not replicate in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('nn.functional.relu6', + aten_name="relu6", + dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypes=floating_types_and(torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::relu6"]), + OpInfo('mm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('mode', + op=torch.mode, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Resized a non-empty tensor but did not warn about it + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FIXME: + # Expected 2114 but got 1123. + # Absolute difference: 991 (up to 0.001 allowed) + # Relative difference: 0.46877956480605487 (up to 0.001 allowed) + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_compare_cpu", + dtypes=(torch.float32,), + device_type="cuda", + ), + ), + sample_inputs_func=sample_inputs_mode,), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1', + domain=(1, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3', + domain=(2, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5', + domain=(3, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), + BinaryUfuncInfo('ne', + ref=np.not_equal, + aliases=('not_equal',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('narrow', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False), + skips=( + # Use of .item() + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo('narrow_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + supports_autograd=False, + # https://github.com/pytorch/pytorch/issues/86931 + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False), + skips=( + # https://github.com/pytorch/pytorch/issues/84577 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('view_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + ref=lambda x, newshape: np.reshape(x, newshape).copy(), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + sample_inputs_func=sample_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + skips=( + # RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. + DecorateInfo( + unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides" + ), + )), + UnaryUfuncInfo('neg', + aliases=('negative', ), + ref=np.negative, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + error_inputs_func=error_inputs_neg, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('dist', + op=torch.dist, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_dist), + OpInfo('outer', + op=torch.outer, + aliases=('ger', ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_outer,), + OpInfo('ormqr', + op=torch.ormqr, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_ormqr, + error_inputs_func=error_inputs_ormqr, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('permute', + ref=np.transpose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_varargs=True, + sample_inputs_func=sample_inputs_permute, + reference_inputs_func=reference_inputs_permute), + BinaryUfuncInfo('pow', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + ref=np.power, + # Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled + # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently + # unsupported on CPU. + backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + # FIXME Complex values error with: Greatest absolute difference: nan at index + # Ref: https://github.com/pytorch/pytorch/issues/76853 + # For `chalf`, reference computation in `numpy` is computed in `cfloat`. + # Output of `chalf` saturates to `inf` quicker than reference due to its small range + # which leads to failure of this test. + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + # FIXME: + # Mismatched elements: 1 / 500 (0.2%) + # Greatest absolute difference: nan at index (7, 9, 0) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (7, 9, 0) (up to 0.001 allowed) + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + )), + BinaryUfuncInfo('float_power', + ref=np.float_power, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # FIXME + # AssertionError: Object comparison failed: torch.float64 != torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # -3.43399e+38 is outside the range of representable values of type 'float' + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + # Inplace always promotes to double and thus other floating dtypes are not supported + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bfloat16, torch.float16, torch.float32]), + )), + OpInfo('qr', + op=torch.qr, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]), + UnaryUfuncInfo('rad2deg', + ref=np.degrees, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + UnaryUfuncInfo('real', + ref=np.real, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo( + "roll", + ref=np.roll, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + error_inputs_func=error_inputs_roll, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_roll, + decorators=(onlyNativeDeviceTypes,), + ), + OpInfo( + "rot90", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + error_inputs_func=error_inputs_rot90, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rot90, + ), + # To test reference numerics against multiple values of argument `decimals`, + # we make multiple OpInfo entries with each entry corresponding to different value of decimals. + UnaryUfuncInfo('round', + ref=np.round, + aliases=('special.round',), + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + ), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_0', + aliases=('special.round',), + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_neg_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('sin', + ref=np.sin, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), + UnaryUfuncInfo('sinc', + ref=np_sinc_with_fp16_as_fp32, + aliases=('special.sinc',), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('sinh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('sign', + ref=reference_sign, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + )), + UnaryUfuncInfo('sgn', + ref=reference_sgn, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True), + OpInfo('split', + # Cannot declare this aten_name because of + # test_variant_consistency_jit_split_list_args_cpu_float32 + decomp_aten_name='split_with_sizes', + variant_test_name='list_args', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=partial(sample_inputs_split, list_args=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + # `unsafe_split` supports only `int` for split_size argument + OpInfo('unsafe_split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True, + check_batched_forward_grad=False), + OpInfo('split_with_sizes', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo('split_with_sizes_copy', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # No error raised + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_requires_grad_error"), + )), + BinaryUfuncInfo('__radd__', + op=torch.Tensor.__radd__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::add'],), + BinaryUfuncInfo('__rdiv__', + op=torch.Tensor.__rdiv__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + promotes_int_to_float=True, + lhs_make_tensor_kwargs={'exclude_zero': True}, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + skips=( + # https://github.com/pytorch/pytorch/issues/76806 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), + BinaryUfuncInfo('__rmul__', + op=torch.Tensor.__rmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::mul'],), + BinaryUfuncInfo('__rand__', + op=torch.Tensor.__rand__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__ror__', + op=torch.Tensor.__ror__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__rxor__', + op=torch.Tensor.__rxor__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('__rmatmul__', + op=torch.Tensor.__rmatmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}), + "TestDecomp", "test_comprehensive", device_type="cuda", + active_if=TEST_WITH_ROCM), + ), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + BinaryUfuncInfo('__rmod__', + op=torch.Tensor.__rmod__, + dtypes=floating_types_and(torch.bfloat16, torch.half,), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + # Support autograd after torch.remainder(Tensor, Tensor) supports + # autograd of the second argument. + # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 + # supports_autograd=False, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::remainder'],), + BinaryUfuncInfo('__rpow__', + op=torch.Tensor.__rpow__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + # Reference: https://github.com/pytorch/pytorch/issues/54774 + # "log2" "_vml_cpu" not implemented for Half + backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # TODO: FIXME tolerance is too high + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'), + DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::pow'],), + BinaryUfuncInfo('__rsub__', + op=torch.Tensor.__rsub__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::rsub'],), + BinaryUfuncInfo('rsub', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_inplace_autograd=False, + assert_autodiffed=None, + sample_inputs_func=sample_inputs_add_sub), + OpInfo('select', + aten_backward_name='select_backward', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_select, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('select_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_select_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('slice', + op=torch.ops.aten.slice.Tensor, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_slice, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_scripting=False, + supports_inplace_autograd=False, + supports_out=False), + OpInfo('slice_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_slice_scatter, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + UnaryUfuncInfo('signbit', + ref=np.signbit, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False,), + UnaryUfuncInfo('tan', + ref=np.tan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + # FIXME: + # Mismatched elements: 2 / 400 (0.5%) + # Greatest absolute difference: inf at index (7, 16) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (7, 16) (up to 0.001 allowed) + DecorateInfo( + unittest.skip("Skipped!"), + "TestInductorOpInfo", + "test_comprehensive", + dtypes=(torch.float16,), + device_type="cuda", + ), + ), + # tan(pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), + UnaryUfuncInfo('tanh', + ref=np.tanh, + aten_backward_name='tanh_backward', + aliases=('nn.functional.tanh',), + decorators=(precisionOverride({torch.bfloat16: 1e-2}), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(j * pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + OpInfo('tensor_split', + ref=np.array_split, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + ), + sample_inputs_func=sample_inputs_tensor_split,), + OpInfo('hsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_hsplit, + error_inputs_func=error_inputs_hsplit,), + OpInfo('vsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_vsplit, + error_inputs_func=error_inputs_vsplit,), + OpInfo('dsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_dsplit, + error_inputs_func=error_inputs_dsplit,), + OpInfo('triangular_solve', + op=torch.triangular_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_legacy_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[ + skipCUDAIfNoMagma, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=3e-5, rtol=3e-6)}), + 'TestConsistency', 'test_output_match', device_type='cpu', + ), + ], + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # Gradcheck fails + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=floating_and_complex_types()), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + UnaryUfuncInfo('trunc', + aliases=('fix', ), + ref=np.trunc, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + UnaryUfuncInfo('exp2', + aliases=('special.exp2', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('expm1', + aliases=('special.expm1', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + assert_autodiffed=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('nan_to_num', + ref=np.nan_to_num, + dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # Passing numpy_kwargs via sample_kwargs, as numpy does comparison + # with BFloat16 in float, since it currently doesn't support BFloat16. + # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 + sample_kwargs=lambda device, dtype, input: ({}, + {'posinf': torch.finfo(torch.bfloat16).max, + 'neginf': torch.finfo(torch.bfloat16).min}) + if dtype is torch.bfloat16 else ({}, {})), + UnaryUfuncInfo('reciprocal', + ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('rsqrt', + ref=lambda x: np.reciprocal(np.sqrt(x)), + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.half: 5e-2}),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + )), + UnaryUfuncInfo('sqrt', + ref=np.sqrt, + supports_sparse=True, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('square', + ref=np.square, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + # >>> t = torch.tensor(complex(-0.01, float("inf"))) + # >>> np.square(t.numpy()) + # (-inf-infj) + # >>> t.square() + # tensor(-inf-infj) + # >>> t.cuda().square() + # tensor(inf+nanj, device='cuda:0') + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=[torch.bool]), + ),), + OpInfo('lerp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_lerp, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('angle', + ref=np.angle, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_complex_to_float=True, + skips=( + # Ref: https://github.com/pytorch/pytorch/issues/78413 + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),), + )), + UnaryUfuncInfo('isfinite', + ref=np.isfinite, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isinf', + ref=np.isinf, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isposinf', + ref=np.isposinf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isneginf', + ref=np.isneginf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isreal', + ref=np.isreal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isnan', + ref=np.isnan, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + OpInfo('einsum', + # we need this lambda because SampleInput expects tensor input as the first argument + # TODO(@heitorschueroff) update SampleInput to handle such cases + op=lambda tensors, equation: torch.einsum(equation, tensors), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + sample_inputs_func=sample_inputs_einsum, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # test does not work with passing lambda for op + # there's a test `test_einsum` in `test_jit.py` to handle this case + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('svd', + op=torch.svd, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_svd, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('svd_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + # Due to the use of randomness + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_svd_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.complex64: tol(atol=1e-02, rtol=1e-02)}), + 'TestCommon', 'test_noncontiguous_samples'), + # FIXME This should be the following, but the toleranceOverride does not seem to do anything! + # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), + # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + DecorateInfo(unittest.skip("See comment above"), + 'TestFwdGradients', + 'test_fn_fwgrad_bwgrad', + dtypes=[torch.complex128]), + ], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('pca_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_pca_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.complex64: tol(atol=4e-02, rtol=4e-02)}), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=5e-05)}), + 'TestOperators', 'test_grad'), + # FIXME This should be the following, but the toleranceOverride does not seem to do anything! + # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), + # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + DecorateInfo(unittest.skip("See comment above"), + 'TestFwdGradients', + 'test_fn_fwgrad_bwgrad', + dtypes=[torch.complex128]), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=3e-5, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), + ], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('polar', + dtypes=floating_types(), + # this function is undefined if 'abs' values are <0 + supports_forward_ad=True, + lhs_make_tensor_kwargs=dict(low=0), + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 + # Numerical: + # tensor([[0.]], dtype=torch.float64) + # Analytical: + # tensor([[-0.0047]], dtype=torch.float64, grad_fn=) + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + )), + # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. + # To test reference numerics against multiple values of argument `n`, + # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). + # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_0', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)), + *(UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name=f'polygamma_n_{n_}', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + decorators=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-3)}), 'TestUnaryUfuncs'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e1, rtol=1e-1), + torch.float32: tol(atol=1e-4, rtol=1e-2)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal', + active_if=IS_WINDOWS), + ), + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + sample_kwargs=lambda device, dtype, input: ({'n': n_}, {'n': n_}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), + safe_val=1)) + for n_ in (1, 2, 3, 4)), + OpInfo('ravel', + ref=np.ravel, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_ravel, + ), + OpInfo('unravel_index', + ref=np.unravel_index, + dtypes=integral_types_and(), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_unravel_index, + ), + OpInfo('reshape', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('reshape_as', + op=lambda x, other: x.reshape_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('view', + op=lambda x, shape: x.view(shape), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('view_as', + op=lambda x, other: x.view_as(other), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides") + )), + OpInfo('atleast_1d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_atleast1d2d3d, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + ), + OpInfo('atleast_2d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('atleast_3d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('flatten', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_flatten, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_flatten, + reference_inputs_func=reference_inputs_flatten, + ), + OpInfo('unflatten', + op=torch.unflatten, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_unflatten, + ), + OpInfo('column_stack', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_column_stack,), + OpInfo('pinverse', + op=torch.pinverse, + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_invertible, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('gather', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_gather, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_gather, + ), + OpInfo('index_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'), + ), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True)), + OpInfo('index_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_select', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_select, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_add', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_add, + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + *(OpInfo('index_reduce', + variant_test_name=reduction_type, + dtypes=all_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive'), + ), + supports_out=True, + sample_inputs_func=sample_inputs_index_reduce, + ) for reduction_type in ('mean', 'prod', 'amin', 'amax')), + OpInfo('_unsafe_masked_index', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=False, + supports_inplace_autograd=False, + supports_scripting=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs__unsafe_masked_index, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward', + dtypes=(torch.float64,), active_if=IS_WINDOWS), + ),), + OpInfo('_unsafe_masked_index_put_accumulate', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=False, + supports_inplace_autograd=False, + supports_scripting=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-2)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu' + ), + ), + sample_inputs_func=sample_inputs__unsafe_masked_index_put_accumulate, + skips=( + DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward', + dtypes=(torch.float64,), active_if=IS_WINDOWS), + ),), + OpInfo('__getitem__', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + supports_scripting=False, + op=torch.Tensor.__getitem__, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), + sample_inputs_func=sample_inputs_getitem), + OpInfo('index_put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + test_neg_view=False, + sample_inputs_func=sample_inputs_index_put, + skips=( + DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64], + device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)), + )), + OpInfo('sort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + )), + OpInfo('unique', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.uint16, torch.uint32, torch.uint64), + sample_inputs_func=sample_inputs_unique, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('unique_consecutive', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique_consecutive, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, # vmap complains of the sizes + sample_inputs_func=sample_inputs_put), + OpInfo('take', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + check_batched_grad=False, # vmap complains of the sizes + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_take, + error_inputs_func=error_inputs_take), + OpInfo('scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter, + error_inputs_func=error_inputs_scatter_and_scatter_add), + UnaryUfuncInfo( + 'bfloat16', + op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'bool', + op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attributis not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'byte', + op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_byte, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'char', + op=lambda x, *args, **kwargs: x.char(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'double', + op=lambda x, *args, **kwargs: x.double(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'float', + op=lambda x, *args, **kwargs: x.float(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'half', + op=lambda x, *args, **kwargs: x.half(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=True, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'int', + op=lambda x, *args, **kwargs: x.int(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'long', + op=lambda x, *args, **kwargs: x.long(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'short', + op=lambda x, *args, **kwargs: x.short(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'cdouble', + op=torch.Tensor.cdouble, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'cfloat', + op=torch.Tensor.cfloat, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'chalf', + op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + # use of lambda doesn't work with test_normalize_operator_exhaustive + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', + device_type='cpu'), + # TypeError: 'int' object is not iterable + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + OpInfo('empty_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + reference_inputs_func=reference_inputs_like_fns, + supports_autograd=False, + skips=( + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), + "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('zeros_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + )), + OpInfo('ones_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + )), + OpInfo('randn', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs), + supports_out=True, + sample_inputs_func=sample_inputs_randn, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randn generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + # randn fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('randn_like', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('rand_like', + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('randint', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda *args, **kwargs: + wrapper_set_seed(torch.randint, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randint generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # randint fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices', + dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM), + )), + OpInfo('randint_like', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint_like, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('full_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_full_like, + supports_autograd=False, + skips=( + )), + OpInfo('new_zeros', + op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('new_ones', + op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('ones', + op=torch.ones, + supports_autograd=False, + supports_varargs=True, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('zeros', + op=torch.zeros, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('full', + op=torch.full, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_full, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # RuntimeError: UNSUPPORTED DTYPE: bool + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('new_empty', + op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + supports_autograd=False), + OpInfo('new_empty_strided', + op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True), + supports_autograd=False, + skips=( + # FX failed to normalize op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Lazy tensor failures + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('empty_strided', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_empty_strided, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), + # Lazy tensor failures + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), + # RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single + # memory location. Please clone() the tensor before performing the operation. + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('empty', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('eye', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_eye, + error_inputs_func=error_inputs_eye, + supports_out=True, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('empty_permuted', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty_permuted, + error_inputs_func=error_inputs_empty_permuted, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('scalar_tensor', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_scalar_tensor, + supports_autograd=False, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('new_full', + op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_full, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('multinomial', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_multinomial, + error_inputs_func=error_inputs_multinomial, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Strides are not the same! + # This may not be reproducible in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_autograd=False), + OpInfo('normal', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.normal, inp, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_first, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # RuntimeError: Difference from {dtype} is larger with decomposition + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # The inplace variant (Tensor.normal_) is different from torch.normal + # inplace varaint Tensor.normal_ is decomposed using randn_like() + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))), + OpInfo('normal', + # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here + variant_test_name='number_mean', + op=lambda std, mean, *args, **kwargs: + wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_second, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # AssertionError in CUDA variant + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))), + OpInfo('bernoulli', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), + # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli + inplace_variant=None, + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_bernoulli, + error_inputs_func=error_inputs_bernoulli, + skips=( + # vmap: We do not yet support calling random operations inside of vmap + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Expected RuntimeError when doing an unsafe cast from a result of + # dtype torch.float32 into an out= with dtype torch.lon + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + OpInfo('scatter_add', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_add, + error_inputs_func=error_inputs_scatter_and_scatter_add, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('stack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_stack, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # https://github.com/pytorch/pytorch/issues/77046 + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ), + ), + OpInfo('_chunk_cat', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_chunk_cat, + error_inputs_func=error_inputs_chunk_cat, + supports_autograd=False, + supports_out=True, + ), + OpInfo('hstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + BinaryUfuncInfo('hypot', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False), + OpInfo('histogram', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU + sample_inputs_func=sample_inputs_histogram, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Not Implemented on XLA. + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), + )), + OpInfo('histogramdd', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU + sample_inputs_func=sample_inputs_histogramdd, + error_inputs_func=error_inputs_histogramdd, + supports_autograd=False, + skips=( + # Not implemented on CUDA + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'), + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('histc', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), + sample_inputs_func=sample_inputs_histc, + supports_out=True, + supports_autograd=False, + skips=( + # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor + # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast + # from a result of dtype torch.float32 into an out= with dtype torch.long" + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('bincount', + dtypes=integral_types_and(), + sample_inputs_func=sample_inputs_bincount, + supports_out=False, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('bucketize', + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_bucketize, + reference_inputs_func=reference_inputs_bucketize, + error_inputs_func=error_inputs_bucketize, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('searchsorted', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_searchsorted, + supports_autograd=False, + ref=reference_searchsorted, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cat', + ref=_cat_np, + aliases=('concat', 'concatenate'), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + sample_inputs_func=sample_inputs_cat_concat, + reference_inputs_func=reference_inputs_cat, + error_inputs_func=error_inputs_cat, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + assert_autodiffed=True, + skips=( + # https://github.com/pytorch/pytorch/issues/89353 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: Arguments for call not valid. + # Expected a value of type 'List[Tensor]' for argument + # 'tensors' but instead found type 'Tensor (inferred)'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # see https://github.com/pytorch/pytorch/issues/99806 + # RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0. + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + )), + OpInfo('unbind', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + ref=reference_unbind, + sample_inputs_func=sample_inputs_unbind, + error_inputs_func=error_inputs_unbind, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + supports_out=False, + ), + OpInfo('vstack', + aliases=('row_stack',), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: _fn() Expected a value of type + # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), + OpInfo('dstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo('unfold', + op=lambda x, *args: x.unfold(*args), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ), + sample_inputs_func=sample_inputs_unfold), + OpInfo('unfold_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_unfold), + OpInfo('msort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_msort, + skips=( + )), + OpInfo('movedim', + aliases=('moveaxis',), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_movedim_moveaxis, + reference_inputs_func=reference_movedim_moveaxis, + error_inputs_func=error_movedim_moveaxis), + OpInfo('renorm', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_renorm, + error_inputs_func=error_inputs_renorm, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Difference from float64 is larger with decomposition + # linalg_vector_norm.default than original on output 0. + # Original max diff: 2.560596747969157e-07, + # Decomp max diff: 1.8187482915266173e-06 + DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive', + device_type='cpu', dtypes=(torch.float16,)), + )), + ShapeFuncInfo('repeat', + op=lambda x, dims: x.repeat(dims), + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('squeeze', + ref=_squeeze_ref, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze), + OpInfo('squeeze', + ref=_squeeze_ref, + variant_test_name="multiple", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze_multiple), + UnaryUfuncInfo( + 'fill', + ref=_fill_np, + method_variant=None, + sample_kwargs=_fill_sample_kwargs, + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + skips=( + # JIT has issue when op is passed as lambda + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), + )), + OpInfo('resize_', + op=lambda x, shape: x.clone().resize_(shape), + method_variant=None, + inplace_variant=torch.Tensor.resize_, + # the test fails because resize_ doesn't work with imag views as expected by the test + # https://github.com/pytorch/pytorch/issues/65945 + test_neg_view=False, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('resize_as_', + op=lambda x, other: torch.resize_as_(x.clone(), other), + method_variant=None, + inplace_variant=torch.Tensor.resize_as_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('take_along_dim', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_take_along_dim, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + ShapeFuncInfo('tile', + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile), + OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' + ), + ], + sample_inputs_func=sample_trapezoid), + OpInfo('trapezoid', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' + ), + ], + sample_inputs_func=sample_trapezoid), + OpInfo('cumulative_trapezoid', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=4e-3, rtol=4e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', + ), + ), + sample_inputs_func=sample_cumulative_trapezoid,), + OpInfo('unsqueeze', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + sample_inputs_func=sample_unsqueeze), + OpInfo('unsqueeze_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + sample_inputs_func=sample_unsqueeze, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo( + unittest.expectedFailure, + 'TestJit', + 'test_variant_consistency_jit', + dtypes=(torch.float32,), + ), + )), + BinaryUfuncInfo('xlogy', + aliases=('special.xlogy',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test 0 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=0.01)), + OpInfo('zero_', + op=lambda x: torch.zero_(x.clone()), + method_variant=None, + inplace_variant=torch.Tensor.zero_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_zero_), + OpInfo('logsumexp', + aliases=('special.logsumexp',), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_fast_mode=False, + sample_inputs_func=sample_inputs_logsumexp, + reference_inputs_func=reference_inputs_logsumexp), + OpInfo('trace', + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + error_inputs_func=error_inputs_trace, + supports_inplace_autograd=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_trace), + OpInfo('transpose', + ref=_numpy_ref_transpose, + aliases=('swapdims', 'swapaxes'), + assert_jit_shape_analysis=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_transpose_swapdims), + OpInfo('T', + op=lambda x: x.T, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T, + error_inputs_func=error_inputs_T), + OpInfo('H', + op=lambda x: x.H, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T), + OpInfo('mT', + op=lambda x: x.mT, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('mH', + op=lambda x: x.mH, + aliases=('adjoint',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('tril', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('tril_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('kron', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kron, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('inner', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_inner, + ), + OpInfo('tensordot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_tensordot, + skips=( + # Skip operator schema test because this is a functional and not an operator. + # Reference: https://github.com/pytorch/pytorch/issues/54574 + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ) + ), + OpInfo('to_sparse', + op=lambda x, *args: x.to_sparse(*args), + sample_inputs_func=sample_inputs_to_sparse, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + backward_dtypes=floating_types(), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_sparse_csr=True, + supports_sparse_csc=True, + check_batched_grad=False, + check_batched_gradgrad=False, + skips=( + # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend + DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), + # TODO: FIXME: complex inputs requiring grad error in forward + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Allowed exception: sparse tensors don't have strides + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'), + # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. + DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), + 'TestSparseCSR', 'test_sparse_csr_consistency'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ) + ), + OpInfo('logcumsumexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), + # RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble' + # Falling back to non-numerically stablized exp, causing nan in the results. + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=7e-5, rtol=6e-3), + }), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda" + ), + ), + sample_inputs_func=sample_inputs_logcumsumexp, + error_inputs_func=error_inputs_logcumsumexp), + UnaryUfuncInfo('sigmoid', + aliases=('special.expit', 'nn.functional.sigmoid'), + aten_backward_name='sigmoid_backward', + ref=reference_sigmoid if TEST_SCIPY else None, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble])), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + assert_autodiffed=True, + # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 1j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + UnaryUfuncInfo('digamma', + ref=scipy.special.digamma if TEST_SCIPY else None, + aliases=('special.psi', 'special.digamma',), + decorators=(precisionOverride({torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erf', + ref=scipy.special.erf if TEST_SCIPY else None, + aliases=('special.erf', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + + ), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfc', + ref=scipy.special.erfc if TEST_SCIPY else None, + aliases=('special.erfc', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfinv', + ref=scipy.special.erfinv if TEST_SCIPY else None, + aliases=('special.erfinv', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + domain=(-1, 1), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + )), + OpInfo("nn.functional.smooth_l1_loss", + ref=reference_smooth_l1_loss, + sample_inputs_func=sample_inputs_smooth_l1_loss, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), + OpInfo( + "nn.functional.l1_loss", + ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), + sample_inputs_func=sample_inputs_l1_loss, + error_inputs_func=error_inputs_l1_loss, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + UnaryUfuncInfo('lgamma', + ref=reference_lgamma if TEST_SCIPY else None, + aliases=('special.gammaln', ), + decorators=(precisionOverride({torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + # lgamma have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + OpInfo( + 'logdet', + dtypes=floating_and_complex_types(), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + # `log_softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + OpInfo( + 'log_softmax', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + aten_backward_name='_log_softmax_backward_data', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo( + 'log_softmax', + variant_test_name='with_dtype', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('logit', + aten_backward_name='logit_backward', + ref=scipy.special.logit if TEST_SCIPY else None, + domain=(0, 1), + aliases=('special.logit', ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_logit), + OpInfo('where', + # Currently only the `input` is tested in gradcheck. + # If we pass `condition` first, none of the input which supports + # autograd will be tested. Hence the following lambda. + op=lambda self, condition, other, **kwargs: torch.where(condition, self, other, **kwargs), + ref=lambda self, condition, other: np.where(condition, self, other), + sample_inputs_func=sample_inputs_where, + reference_inputs_func=reference_inputs_where, + error_inputs_func=error_inputs_where, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + ), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), + OpInfo('nonzero', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # nonzero(): argument 'out' must be Tensor, not tuple + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67458 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # nonzero is not raising a warning when the out is resized + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Can't find schemas for this operator for some reason + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('nonzero_static', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero_static, + supports_out=False, + supports_autograd=False, + decorators=[onlyCPU], + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + # Following tests are for jiterator's python interface + # Jiterator can be used to author elementwise CUDA kernel + # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op + # See create_jit_fn in jiterator.py for more information + UnaryUfuncInfo( + 'jiterator_unary', + op=torch.cuda.jiterator._create_jit_fn("template T unary(T x) { return x * x + x; }"), + ref=lambda x: x * x + x, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[ + onlyCUDA, + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_hard'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + ], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=[torch.bool]), + # ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.complex64], active_if=TEST_WITH_ROCM), + # Expected failure: torch.jiterator_unary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_binary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_4inputs_with_extra_args', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", + alpha=1, beta=1), + ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary_return_by_ref', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_return_by_ref(T i0, T i1, T& out0) { + out0 = i0 + i1; + } + """, + num_outputs=1), + ref=operator.add, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_2inputs_2outputs', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_2outputs(T i0, T i1, T& out0, T& out1) { + out0 = i0 + i1; + out1 = i0 - i1; + } + """, + num_outputs=2), + ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + # `torch.norm` has multiple code paths depending on the value of `p`. + # These paths have different dtype support. Also JIT supports, + # most variants but not all of them. So we split the OpInfo entries, + # for `norm` based on the code-paths and JIT support. + OpInfo( + "norm", + sample_inputs_func=sample_inputs_norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # TODO Benchmark again with the new implementation + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='nuc', + sample_inputs_func=sample_inputs_norm_nuc, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + check_batched_gradgrad=False, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types(), + skips=( + # Dispatches in Python to matrix_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='fro', + sample_inputs_func=sample_inputs_norm_fro, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # MPS has some mild accuracy issues for float16. We divide the tolerances by 10 + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}), + 'TestConsistency', + 'test_output_match', + + ), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo( + "norm", + variant_test_name="inf", + sample_inputs_func=sample_inputs_norm_inf, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + # fast gradcheck produces NaNs + gradcheck_fast_mode=False, + skips=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)) + ), + ), + OpInfo('t', + sample_inputs_func=sample_inputs_t, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + assert_autodiffed=True, + error_inputs_func=error_inputs_t), + OpInfo('t_copy', + sample_inputs_func=sample_inputs_t, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + assert_autodiffed=True, + error_inputs_func=error_inputs_t), + OpInfo( + "nn.functional.dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Probably because we have used lambda for the op here + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # inplace variant dispatches to dropout kernel, while on CUDA + # the op dispatches to _fused_dropout (with a few more conditions) + # hence, different values and this skip here + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "native_dropout_backward", + op=torch.ops.aten.native_dropout_backward.default, + aten_name="native_dropout_backward", + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_dropout_backward, + skips=( + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # Lazy tensor failures + DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # These tests fail only when built with ASAN + DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN), + DecorateInfo( + unittest.skip("Fails with ASAN"), + 'TestLazyOpInfo', + 'test_correctness_with_reusing_ir', + active_if=TEST_WITH_ASAN + ), + ), + ), + OpInfo( + "nn.functional.dropout2d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (3, 4) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.dropout3d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + check_batched_forward_grad=False, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: Tensor-likes are not close! + # Fails in cuda11.7 + # Error Log: https://github.com/pytorch/pytorch/actions/runs/3440108478/jobs/5738475757 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),), + # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype + # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="with_train", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # vmap: We do not yet support calling random operations inside of vmap. + # Please perform random operations outside of vmap as a workaround + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="without_train", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=partial(sample_inputs_dropout, train=False), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.one_hot", + ref=reference_one_hot, + supports_out=False, + dtypes=_dispatch_dtypes((torch.int64,)), + sample_inputs_func=sample_inputs_one_hot, + ), + OpInfo( + "nn.functional.embedding", + aten_backward_name="embedding_dense_backward", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_embedding, + allow_cow_input_materialize_forward=[0], + error_inputs_func=error_inputs_embedding, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Fails on CI https://github.com/pytorch/pytorch/issues/85377 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + # Reference: https://github.com/pytorch/pytorch/issues/67084 + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + # Not a problem: embedding does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + # Fails due to non-determinism (see issue #74679) + # TODO: Investigate why more granular skips in the test don't work in CI + DecorateInfo(unittest.skip('Skipped!'), + 'TestExpandedWeightFunctional', + 'test_expanded_weight_forward'), + ), + supports_expanded_weight=True, + supports_out=False, + ), + OpInfo( + "nn.functional.embedding_bag", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + # backward is not supported for mode `max` and dtype `bfloat16` + backward_dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_embedding_bag, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + supports_gradgrad=False, + allow_cow_input_materialize_forward=[0], + ), + OpInfo( + "nn.functional.multi_head_attention_forward", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_multi_head_attention_forward, + skips=( + # Tensor-likes are not close + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'), + + # TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention) + DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'), + # randomness + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # lambda impl + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # tests running very slowly break slow tests, so we skip them instead of using `slowTest`. + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_comprehensive', + dtypes=(torch.bfloat16, torch.float16), + ), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_quick', + dtypes=(torch.bfloat16, torch.float16))), + supports_out=False, + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + ), + UnaryUfuncInfo( + "nn.functional.softplus", + aten_backward_name='softplus_backward', + ref=reference_softplus, + sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + decorators=( + DecorateInfo( + toleranceOverride + ({ + torch.half: tol(atol=1e-2, rtol=1e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1e-2), + }), + 'TestUnaryUfuncs'), + ), + ), + OpInfo( + "nn.functional.mse_loss", + aten_backward_name='mse_loss_backward', + ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), + sample_inputs_func=sample_inputs_loss, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16), + backward_dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.grid_sample", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sample, + reference_inputs_func=reference_inputs_grid_sample, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15), + # TODO: delete this OpInfo once we add meta support for grid_sampler_3d + OpInfo( + "grid_sampler_2d", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sampler_2d, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15, + skips=( + DecorateInfo(slowTest, 'TestDecomp', 'test_comprehensive', dtypes=(torch.float32, torch.float64), + active_if=IS_WINDOWS), + ),), + OpInfo( + "argwhere", + ref=np.argwhere, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_argwhere, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ), + ), + ReductionOpInfo( + 'all', + identity=True, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.all), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'any', + identity=False, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.any), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'amax', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amax), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'amin', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amin), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'argmax', + supports_multiple_dims=False, + supports_autograd=False, + assert_jit_shape_analysis=True, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + ), + ReductionOpInfo( + 'argmin', + supports_multiple_dims=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + ), + ReductionOpInfo( + 'count_nonzero', + identity=0, + supports_out=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_reduction_count_nonzero, + ref=reference_reduction_numpy(np.count_nonzero), + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionOpInfo( + 'mean', + nan_policy='propagate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # FIXME: mean needs 'dim' parameter when using the 'out' overload. + # Adding it with 'generate_args_kwargs' does not work, since these also get passed + # onto the reference implementations. + supports_out=True, + assert_autodiffed=True, + assert_jit_shape_analysis=True, + promotes_int_to_float=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.mean), + error_inputs_func=error_inputs_mean, + skips=( + # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result + # of dtype torch.float32 into an out= with dtype torch.long + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='cuda', dtypes=[torch.float32]), + # FIXME: mean does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'nanmean', + nan_policy='omit', + assert_autodiffed=True, + promotes_int_to_float=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nanmean), + skips=( + # AssertionError: False is not true : + # Failure in testing nodes' autodifferentiation. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + device_type='cuda', dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'std', + nan_policy='propagate', + supports_out=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.std), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + ReductionOpInfo( + 'std', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'var', + nan_policy='propagate', + supports_out=True, + assert_autodiffed=True, + promotes_int_to_float=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.var), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), + # NumPy is giving NaN for this + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), + ), + ), + ReductionOpInfo( + 'var', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'prod', + identity=1, + nan_policy='propagate', + supports_multiple_dims=False, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_prod, + ref=prod_numpy, + skips=( + # FIXME: prod does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: prod does not support passing None to dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.uint8, torch.float16, torch.complex64]), + # FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float16]), + ), + ), + ReductionOpInfo( + 'sum', + identity=0, + nan_policy='propagate', + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_reduction_numpy(np.sum), + error_inputs_sparse_func=error_inputs_sparse_reduction_sum, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc), + skips=( + # FIXME: sum does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + ReductionOpInfo( + 'nansum', + identity=0, + nan_policy='omit', + supports_out=True, + promotes_int_to_int64=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nansum), + skips=( + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: nansum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: flaky test so skipped instead of xfailed + # possibly bad low precision reference in numpy + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + ), + ), + OpInfo( + "nn.functional.ctc_loss", + dtypes=floating_types(), + supports_out=False, + sample_inputs_func=sample_inputs_ctc_loss, + skips=( + # https://github.com/pytorch/pytorch/issues/67462 + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_grad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + # Ref: https://github.com/pytorch/pytorch/issues/85231 + DecorateInfo(unittest.skip("Fails with ASAN"), + 'TestProxyTensorOpInfo', + 'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN), + ), + ), + OpInfo( + "nn.functional.cosine_embedding_loss", + dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-4, rtol=2e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_cosine_embedding_loss, + ), + OpInfo( + "nn.functional.nll_loss", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_nll_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + skips=( + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0, i1): + # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120782 + DecorateInfo( + unittest.skip("Skipped!"), + "TestCompositeCompliance", + "test_cow_input", + device_type='cuda', + ), + DecorateInfo(unittest.skip("FP16 nll_loss cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + + ), + ), + OpInfo( + "nn.functional.gaussian_nll_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_gaussian_nll_loss, + error_inputs_func=error_inputs_gaussian_nll_loss, + skips=( + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.hinge_embedding_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_hinge_embedding_loss, + error_inputs_func=error_inputs_hinge_embedding_loss, + reference_inputs_func=reference_inputs_hinge_embedding_loss, + ), + OpInfo( + "nn.functional.huber_loss", + aten_backward_name='huber_loss_backward', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_huber_loss, + error_inputs_func=error_inputs_huber_loss, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ) + ), + OpInfo( + "nn.functional.pdist", + ref=reference_pdist, + sample_inputs_func=sample_inputs_pdist, + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + skips=( + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + ) + ), + OpInfo( + "nn.functional.poisson_nll_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_poisson_nll_loss, + error_inputs_func=error_inputs_poisson_nll_loss, + ), + OpInfo( + "argsort", + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + OpInfo( + "repeat_interleave", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_repeat_interleave, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pairwise_distance", + ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( + np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) + ), + sample_inputs_func=sample_inputs_pairwise_distance, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_shuffle", + sample_inputs_func=sample_inputs_pixel_shuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_unshuffle", + sample_inputs_func=sample_inputs_pixel_unshuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.channel_shuffle", + sample_inputs_func=sample_inputs_channel_shuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + allow_cow_input_materialize_forward=[0], + allow_cow_input_materialize_backward=[0, 'output grad 0'], + skips=( + # Skip due to NotImplementedError for MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + OpInfo( + "nn.functional.kl_div", + sample_inputs_func=sample_inputs_kl_div, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "diagflat", + ref=lambda input, offset=0: np.diagflat(input, k=offset), + sample_inputs_func=sample_inputs_diagflat, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='sum', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='prod', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_reduce, + skips=( + # Not implemented + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + ), + ), + OpInfo( + 'scatter_reduce', + variant_test_name='mean', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amin', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amax', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='lengths', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=sample_inputs_segment_reduce, + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='offsets', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'), + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), +] +op_db += opinfo.definitions.op_db + + +# Separate registry for experimental Python Reference OpInfos. +python_ref_db = [ + # + # Elementwise Unary OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.abs", + torch_opinfo_name="abs", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acos", + torch_opinfo_name="acos", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acosh", + torch_opinfo_name="acosh", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asin", + torch_opinfo_name="asin", + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asinh", + torch_opinfo_name="asinh", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + PythonRefInfo( + "_refs.lerp", + torch_opinfo_name="lerp", + ), + PythonRefInfo( + "_refs.ones", + torch_opinfo_name="ones", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.zeros", + torch_opinfo_name="zeros", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.cauchy", + torch_opinfo_name="cauchy", + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.exponential", + torch_opinfo_name="exponential", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.geometric", + torch_opinfo_name="geometric", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.log_normal", + torch_opinfo_name="log_normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + torch_opinfo_variant_name="number_mean", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal_", + op=torch.Tensor.normal_, + torch_opinfo_name="normal", + torch_opinfo_variant_name="in_place", + supports_out=False, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.arange", + torch_opinfo_name="arange", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="variadic_tensors", + ), + PythonRefInfo( + "_refs.take_along_dim", + torch_opinfo_name="take_along_dim", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.to", + torch_opinfo_name="to", + ), + PythonRefInfo( + "_refs.triu", + torch_opinfo_name="triu", + ), + PythonRefInfo( + "_refs.tril", + torch_opinfo_name="tril", + ), + PythonRefInfo( + "_refs.triu_indices", + torch_opinfo_name="triu_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.tril_indices", + torch_opinfo_name="tril_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="list_of_tensors", + ), + PythonRefInfo( + "_refs.movedim", + aliases=('moveaxis',), + torch_opinfo_name="movedim", + ), + PythonRefInfo( + "_refs.bucketize", + torch_opinfo_name="bucketize", + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor with + # aten._local_scalar_dense.default - erroring out! [...] + # triggered by mid_val = boundaries[mid] + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"), + ) + ), + PythonRefInfo( + "_refs.equal", + torch_opinfo_name="equal", + skips=( + # RuntimeError: Cannot cast FakeTensor to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atan", + torch_opinfo_name="atan", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atanh", + torch_opinfo_name="atanh", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.bitwise_not", + torch_opinfo_name="bitwise_not", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.ceil", + torch_opinfo_name="ceil", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.item", + torch_opinfo_name="item", + skips=( + # RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # ValueError: Can't convert a tensor with 10 elements to a number! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj_physical", + torch_opinfo_name="conj_physical", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cos", + torch_opinfo_name="cos", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cosh", + torch_opinfo_name="cosh", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.digamma", + torch_opinfo_name="digamma", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erf", + torch_opinfo_name="erf", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfinv", + torch_opinfo_name="erfinv", + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfc", + torch_opinfo_name="erfc", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp", + torch_opinfo_name="exp", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.expm1", + torch_opinfo_name="expm1", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp2", + torch_opinfo_name="exp2", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.fill", + torch_opinfo_name="fill", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.floor", + torch_opinfo_name="floor", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frexp", + torch_opinfo_name="frexp", + # Skipped due to numerical failures on Windows CI. + # This is also skipped in frexp earlier in the file. + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frac", + torch_opinfo_name="frac", + skips=( + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.imag", + torch_opinfo_name="imag", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isfinite", + torch_opinfo_name="isfinite", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isinf", + torch_opinfo_name="isinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isposinf", + torch_opinfo_name="isposinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isneginf", + torch_opinfo_name="isneginf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isnan", + torch_opinfo_name="isnan", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isreal", + torch_opinfo_name="isreal", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.i0", + torch_opinfo_name="i0", + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.int8,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.lgamma", + torch_opinfo_name="lgamma", + decorators=(precisionOverride({torch.float16: 7e-1}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_1", + skips=skips_mvlgamma(), + decorators=( + DecorateInfo(torch.testing._internal.common_utils.markDynamoStrictTest, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + DecorateInfo(torch.testing._internal.common_utils.xfailIfTorchDynamo, 'TestUnaryUfuncs', + 'test_reference_numerics_large'), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_3", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_5", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log", + torch_opinfo_name="log", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log1p", + torch_opinfo_name="log1p", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log10", + torch_opinfo_name="log10", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log2", + torch_opinfo_name="log2", + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + PythonRefInfo( + "_refs.logsumexp", + torch_opinfo_name="logsumexp", + # When keepdim=False logsumexp function uses squeeze operation + # that is not yet exposed in nvFuser's Python API. + ), + PythonRefInfo( + "_refs.log_softmax", + torch_opinfo_name="log_softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nan_to_num", + torch_opinfo_name="nan_to_num", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.neg", + torch_opinfo_name="neg", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.positive", + torch_opinfo_name="positive", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.real", + torch_opinfo_name="real", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.reciprocal", + torch_opinfo_name="reciprocal", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.round", + torch_opinfo_name="round", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + skips=( + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rsqrt", + torch_opinfo_name="rsqrt", + decorators=(precisionOverride({torch.half: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sigmoid", + torch_opinfo_name="sigmoid", + aliases=('_refs.special.expit',), + # Reference: https://github.com/pytorch/pytorch/issues/56012 + handles_complex_extremal_values=False, + handles_large_floats=False, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble]) + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sign", + torch_opinfo_name="sign", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sgn", + torch_opinfo_name="sgn", + # This is an issue with the vectorised abs on CPU + handles_complex_extremal_values=False, + handles_large_floats=False, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.signbit", + torch_opinfo_name="signbit", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sin", + torch_opinfo_name="sin", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinc", + torch_opinfo_name="sinc", + decorators=(precisionOverride({torch.bfloat16: 1e-2, + torch.float16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49133 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.cfloat]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinh", + torch_opinfo_name="sinh", + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + ), + ), + PythonRefInfo( + "_refs.softmax", + torch_opinfo_name="softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sqrt", + torch_opinfo_name="sqrt", + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.bfloat16,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.square", + torch_opinfo_name="square", + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + skips=( + # AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)), + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tan", + torch_opinfo_name="tan", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tanh", + torch_opinfo_name="tanh", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.trunc", + torch_opinfo_name="trunc", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.special.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.special.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.logit", + torch_opinfo_name="logit", + ), + # + # Elementwise Unary nn.functional OpInfos + # + PythonRefInfo( + "_refs.nn.functional.alpha_dropout", + torch_opinfo_name="nn.functional.alpha_dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.celu", + torch_opinfo_name="nn.functional.celu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.channel_shuffle", + torch_opinfo_name="nn.functional.channel_shuffle", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.threshold", + torch_opinfo_name="nn.functional.threshold", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.dropout", + torch_opinfo_name="nn.functional.dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # dropout is not comparable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.elu", + torch_opinfo_name="nn.functional.elu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardtanh", + torch_opinfo_name="nn.functional.hardtanh", + supports_out=True, + ), + PythonRefInfo( # TODO: Port this to an UnaryOpInfo + "_refs.nn.functional.gelu", + torch_opinfo_name="nn.functional.gelu", + ), + PythonRefInfo( + "_refs.nn.functional.layer_norm", + torch_opinfo_name="nn.functional.layer_norm", + skips=( + # Reference result was farther (3.5762786809723224e-07) from the precise computation + # than the torch result was (2.5068410824946596e-07)! + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float32,), device_type='cpu'), + ), + ), + PythonRefInfo( + "_refs.nn.functional.glu", + torch_opinfo_name="nn.functional.glu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pairwise_distance", + torch_opinfo_name="nn.functional.pairwise_distance", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pdist", + torch_opinfo_name="nn.functional.pdist", + supports_out=True, + skips=( + # RunTimeError: no _refs support for torch.Tensor.index_select + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + # Reference result was farther (1.946091651916504e-05) from the precise + # computation than the torch result was (1.1920928955078125e-06)! + DecorateInfo( + unittest.expectedFailure, + 'TestCommon', + 'test_python_ref_torch_fallback', + dtypes=(torch.float32,), + device_type='cpu', + ), + )), + PythonRefInfo( + "_refs.nn.functional.leaky_relu", + torch_opinfo_name="nn.functional.leaky_relu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.pixel_shuffle", + torch_opinfo_name="nn.functional.pixel_shuffle", + ), + PythonRefInfo( + "_refs.nn.functional.pixel_unshuffle", + torch_opinfo_name="nn.functional.pixel_unshuffle", + ), + PythonRefInfo( + "_refs.nn.functional.poisson_nll_loss", + torch_opinfo_name="nn.functional.poisson_nll_loss", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.prelu", + torch_opinfo_name="nn.functional.prelu", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu", + torch_opinfo_name="nn.functional.relu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu6", + torch_opinfo_name="nn.functional.relu6", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.mish", + torch_opinfo_name="nn.functional.mish", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + 'TestUnaryUfuncs',), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.selu", + torch_opinfo_name="nn.functional.selu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + PythonRefInfo( + "_refs.nn.functional.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.softmin", + torch_opinfo_name="nn.functional.softmin", + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softplus", + torch_opinfo_name="nn.functional.softplus", + ), + PythonRefInfo( + "_refs.nn.functional.l1_loss", + torch_opinfo_name="nn.functional.l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.margin_ranking_loss", + torch_opinfo_name="nn.functional.margin_ranking_loss", + ), + PythonRefInfo( + "_refs.nn.functional.mse_loss", + torch_opinfo_name="nn.functional.mse_loss", + ), + PythonRefInfo( + "_refs.nn.functional.smooth_l1_loss", + torch_opinfo_name="nn.functional.smooth_l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.hinge_embedding_loss", + torch_opinfo_name="nn.functional.hinge_embedding_loss", + skips=( + # Reference result was farther (0.29562714856322714) from the precise + # computation than the torch result was (0.20437285143677286)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type="cpu" + ), + ), + ), + PythonRefInfo( + "_refs.nn.functional.nll_loss", + torch_opinfo_name="nn.functional.nll_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + # For simpler indexing, we flatten target indices, then reshape the result tensor. + # This creates inconsistent view state with reference impl. + validate_view_consistency=False, + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda" + ), + ), + ), + PythonRefInfo( + "_refs.nn.functional.huber_loss", + torch_opinfo_name="nn.functional.huber_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.tanhshrink", + torch_opinfo_name="nn.functional.tanhshrink", + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02), + torch.complex64: tol(atol=6e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), + active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), + device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardshrink", + torch_opinfo_name="nn.functional.hardshrink", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softshrink", + torch_opinfo_name="nn.functional.softshrink", + ), + # + # Elementwise Binary Reference OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.add", + torch_opinfo_name="add", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.atan2", + torch_opinfo_name="atan2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_and", + torch_opinfo_name="bitwise_and", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_left_shift", + torch_opinfo_name="bitwise_left_shift", + skips=( + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_right_shift", + torch_opinfo_name="bitwise_right_shift", + skips=( + # # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_or", + torch_opinfo_name="bitwise_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_xor", + torch_opinfo_name="bitwise_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.copysign", + torch_opinfo_name="copysign", + skips=( + # RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu! + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="no_rounding_mode", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # NotImplementedError: argument of type: + DecorateInfo( + unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32, torch.complex64, torch.complex128,) + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="trunc_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="floor_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + # Reference result was farther (nan) from the precise computation than the + # torch result was (inf)! + DecorateInfo( + unittest.expectedFailure, + "TestCommon", + "test_python_ref", + dtypes=(torch.bfloat16,), + device_type="cpu", + ), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.eq", + torch_opinfo_name="eq", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.float_power", + torch_opinfo_name="float_power", + skips=( + # Test doesn't account for float -> double type promotion + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logaddexp", + torch_opinfo_name="logaddexp", + skips=( + # failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + PythonRefInfo( + "_refs.logaddexp2", + torch_opinfo_name="logaddexp2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.floor_divide", + torch_opinfo_name="floor_divide", + rhs_make_tensor_kwargs=dict(exclude_zero=True), + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + # bfloat16 floor_divide compared with a float32 reference works inconsistently + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,)), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmax", + torch_opinfo_name="fmax", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmin", + torch_opinfo_name="fmin", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmod", + torch_opinfo_name="fmod", + rhs_make_tensor_kwargs={'exclude_zero': True}, + supports_rhs_python_scalar=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gcd", + torch_opinfo_name="gcd", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ge", + torch_opinfo_name="ge", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gt", + torch_opinfo_name="gt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.heaviside", + torch_opinfo_name="heaviside", + supports_rhs_python_scalar=False, + skips=( + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.hypot", + torch_opinfo_name="hypot", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igamma", + torch_opinfo_name="igamma", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igammac", + torch_opinfo_name="igammac", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.isclose", + torch_opinfo_name="isclose", + skips=( + # Intentional xfail -- isclose does not type promote + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lcm", + torch_opinfo_name="lcm", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.le", + torch_opinfo_name="le", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_and", + torch_opinfo_name="logical_and", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.logical_not", + torch_opinfo_name="logical_not", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_or", + torch_opinfo_name="logical_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_xor", + torch_opinfo_name="logical_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lt", + torch_opinfo_name="lt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.maximum", + torch_opinfo_name="maximum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.minimum", + torch_opinfo_name="minimum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.mul", + torch_opinfo_name="mul", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type='cuda' + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type='cuda' + ), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ne", + torch_opinfo_name="ne", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.nextafter", + torch_opinfo_name="nextafter", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.pow", + torch_opinfo_name="pow", + decorators=( + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.remainder", + torch_opinfo_name="remainder", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.rsub", + torch_opinfo_name="rsub", + # https://github.com/pytorch/pytorch/issues/76944 + skips=( + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.chalf,), device_type='cpu'), + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.chalf,), device_type='cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.sub", + torch_opinfo_name="sub", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.true_divide", + torch_opinfo_name="true_divide", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + # + # Elementwise Ternary Reference OpInfos + # + PythonRefInfo( + "_refs.addcdiv", + torch_opinfo_name="addcdiv", + ), + PythonRefInfo( + "_refs.addcmul", + torch_opinfo_name="addcmul", + skips=( + # Reference result was farther (1.3343989849090576e-05) + # from the precise computation than the torch result + # was (9.592622518539429e-06)! + # FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float16,), device_type="cpu"), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.float16,), device_type="cpu"), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_min", + torch_opinfo_name="clamp_min", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_max", + torch_opinfo_name="clamp_max", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.clamp", + torch_opinfo_name="clamp", + ), + PythonRefInfo( + "_refs.nn.functional.triplet_margin_loss", + torch_opinfo_name="nn.functional.triplet_margin_loss", + supports_out=False, + # TODO: Uses minimum and clamp + skips=( + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed) + # Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed) + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8,), device_type="cpu"), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.xlogy", + torch_opinfo_name="xlogy", + supports_one_python_scalar=True, + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.xlog1py", + torch_opinfo_name="special.xlog1py", + supports_one_python_scalar=True, + ), + # + # Data Conversion & Data Movement Opinfos + # + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bfloat16", + torch_opinfo_name="bfloat16", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bool", + torch_opinfo_name="bool", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.byte", + torch_opinfo_name="byte", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.char", + torch_opinfo_name="char", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.complex", + torch_opinfo_name="complex", + error_inputs_func=partial(error_inputs_complex, is_ref=True), + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.polar", + torch_opinfo_name="polar", + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.double", + torch_opinfo_name="double", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.float", + torch_opinfo_name="float", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.half", + torch_opinfo_name="half", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.int", + torch_opinfo_name="int", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.long", + torch_opinfo_name="long", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.short", + torch_opinfo_name="short", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.chalf", + torch_opinfo_name="chalf", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cfloat", + torch_opinfo_name="cfloat", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cdouble", + torch_opinfo_name="cdouble", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.clone", + torch_opinfo_name="clone", + ), + # + # View & Shape OpInfos + # + PythonRefInfo( + "_refs.alias_copy", + torch_opinfo_name="alias_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.atleast_1d", + torch_opinfo_name="atleast_1d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_2d", + torch_opinfo_name="atleast_2d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_3d", + torch_opinfo_name="atleast_3d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.as_strided_copy", + torch_opinfo_name="as_strided_copy", + supports_out=True, + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + # The view function this decompose into does not have a ref + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), + ), + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + torch_opinfo_variant_name="partial_views", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.as_strided_scatter", + torch_opinfo_name="as_strided_scatter", + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.block_diag", + torch_opinfo_name="block_diag", + ), + PythonRefInfo( + "_refs.broadcast_shapes", + torch_opinfo_name="broadcast_shapes", + ), + PythonRefInfo( + "_refs.broadcast_tensors", + torch_opinfo_name="broadcast_tensors", + ), + PythonRefInfo( + "_refs.broadcast_to", + torch_opinfo_name="broadcast_to", + ), + PythonRefInfo( + "_refs.cat", + torch_opinfo_name="cat", + skips=( + # FIXME: AssertionError: RuntimeError not raised + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.chunk", + torch_opinfo_name="chunk", + ), + PythonRefInfo( + "_refs.column_stack", + torch_opinfo_name="column_stack", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj", + torch_opinfo_name="conj", + ), + PythonRefInfo( + "_refs.constant_pad_nd", + torch_opinfo_name="constant_pad_nd", + ), + PythonRefInfo( + "_refs.contiguous", + torch_opinfo_name="contiguous", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.deg2rad", + torch_opinfo_name="deg2rad", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.dsplit", + torch_opinfo_name="dsplit", + ), + PythonRefInfo( + "_refs.diag", + torch_opinfo_name="diag", + ), + PythonRefInfo( + "_refs.diagonal", + torch_opinfo_name="diagonal", + ), + PythonRefInfo( + "_refs.diagonal_copy", + torch_opinfo_name="diagonal_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.diagonal_scatter", + torch_opinfo_name="diagonal_scatter", + supports_out=True, + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.diag_embed", + torch_opinfo_name="diag_embed", + supports_out=True, + ), + PythonRefInfo( + "_refs.dstack", + torch_opinfo_name="dstack", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.expand", + torch_opinfo_name="expand", + ), + PythonRefInfo( + "_refs.expand_as", + torch_opinfo_name="expand_as", + ), + PythonRefInfo( + "_refs.expand_copy", + torch_opinfo_name="expand_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.flatten", + torch_opinfo_name="flatten", + ), + PythonRefInfo( + "_refs.flip", + torch_opinfo_name="flip", + ), + PythonRefInfo( + "_refs.fliplr", + torch_opinfo_name="fliplr", + ), + PythonRefInfo( + "_refs.flipud", + torch_opinfo_name="flipud", + ), + PythonRefInfo( + "_refs.hstack", + torch_opinfo_name="hstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.narrow", + torch_opinfo_name="narrow", + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True), + ), + PythonRefInfo( + "_refs.narrow_copy", + torch_opinfo_name="narrow_copy", + supports_out=True, + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True), + skips=( + # The view function this decompose into does not have a ref + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), + ), + ), + PythonRefInfo( + "_refs.nn.functional.group_norm", + torch_opinfo_name="nn.functional.group_norm", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.native_layer_norm", + torch_opinfo_name="native_layer_norm", + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref", + device_type="cpu", dtypes=(torch.float32,)), + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback", + device_type="cpu", dtypes=(torch.float32,)), + ), + ), + PythonRefInfo( + "_refs.permute", + torch_opinfo_name="permute", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rad2deg", + torch_opinfo_name="rad2deg", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.ravel", + torch_opinfo_name="ravel", + ), + PythonRefInfo( + "_refs.renorm", + torch_opinfo_name="renorm", + ), + PythonRefInfo( + "_refs.repeat", + torch_opinfo_name="repeat", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.reshape", + torch_opinfo_name="reshape", + ), + PythonRefInfo( + "_refs.reshape_as", + torch_opinfo_name="reshape_as", + ), + PythonRefInfo( + "_refs.roll", + torch_opinfo_name="roll", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.rot90", + torch_opinfo_name="rot90", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.select_scatter", + torch_opinfo_name="select_scatter", + ), + PythonRefInfo( + "_refs.stack", + torch_opinfo_name="stack", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + torch_opinfo_variant_name="multiple", + ), + PythonRefInfo( + "_refs.tensor_split", + torch_opinfo_name="tensor_split", + skips=( + # RuntimeError: no _refs support for torch.Tensor.tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.hsplit", + torch_opinfo_name="hsplit", + ), + PythonRefInfo( + "_refs.vsplit", + torch_opinfo_name="vsplit", + ), + PythonRefInfo( + "_refs.dot", + torch_opinfo_name="dot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.vdot", + torch_opinfo_name="vdot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.transpose", + torch_opinfo_name="transpose", + ), + PythonRefInfo( + "_refs.t", + torch_opinfo_name="t", + ), + PythonRefInfo( + "_refs.t_copy", + torch_opinfo_name="t_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.T", + torch_opinfo_name="T", + error_inputs_func=partial(error_inputs_T, has_ndims_error=True), + ), + PythonRefInfo( + "_refs.unfold", + torch_opinfo_name="unfold", + ), + PythonRefInfo( + "_refs.unfold_copy", + torch_opinfo_name="unfold_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.unsqueeze", + torch_opinfo_name="unsqueeze", + ), + PythonRefInfo( + "_refs.unsqueeze_copy", + torch_opinfo_name="unsqueeze_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.view", + torch_opinfo_name="view", + ), + PythonRefInfo( + "_refs.view_as", + torch_opinfo_name="view_as", + ), + PythonRefInfo( + "_refs.view_copy", + torch_opinfo_name="view_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.vstack", + torch_opinfo_name="vstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.unflatten", + torch_opinfo_name="unflatten", + ), + PythonRefInfo( + "_refs.unbind", + torch_opinfo_name="unbind", + ), + # + # Reduction Reference OpInfos + # + ReductionPythonRefInfo( + "_refs.all", + torch_opinfo_name="all", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.amax", + torch_opinfo_name="amax", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.amin", + torch_opinfo_name="amin", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.any", + torch_opinfo_name="any", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.count_nonzero", + torch_opinfo_name="count_nonzero", + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_default_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_multi_unsorted_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionPythonRefInfo( + "_refs.mean", + torch_opinfo_name="mean", + supports_out=True, + error_inputs_func=partial(error_inputs_mean, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.std", + torch_opinfo_name="std", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + # std_mean and var_mean are not ReductionInfos + PythonRefInfo( + "_refs.std_mean", + torch_opinfo_name="std_mean", + ), + ReductionPythonRefInfo( + "_refs.sum", + torch_opinfo_name="sum", + supports_out=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + PythonRefInfo( + "_refs.cumsum", + torch_opinfo_name="cumsum", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.cumprod", + torch_opinfo_name="cumprod", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.sum_to_size", + torch_opinfo_name="sum_to_size", + validate_view_consistency=False, + ), + ReductionPythonRefInfo( + "_refs.prod", + torch_opinfo_name="prod", + supports_out=True, + supports_multiple_dims=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + ), + ), + ReductionPythonRefInfo( + "_refs.var", + torch_opinfo_name="var", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + ), + ), + PythonRefInfo( + "_refs.var_mean", + torch_opinfo_name="var_mean", + validate_view_consistency=False, + ), + # + # Linear Algebra Operators + # + PythonRefInfo( + "_refs.addr", + torch_opinfo_name="addr", + decorators=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), + ), + ), + PythonRefInfo( + "_refs.trace", + torch_opinfo_name="trace", + ), + PythonRefInfo( + "_refs.norm", + torch_opinfo_name="norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + ), + # + # Tensor Creation Reference OpInfos + # + PythonRefInfo( + "_refs.empty", + torch_opinfo_name="empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: shouldn't check empty results + DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.empty_like", + torch_opinfo_name="empty_like", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.randn", + torch_opinfo_name="randn", + op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs), + skips=( + # see https://github.com/pytorch/pytorch/issues/85121 + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), + 'TestCommon', + 'test_python_ref_executor'), + # These tests expect the input to be a tensor or a sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.eye", + torch_opinfo_name="eye", + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + ), + ), + PythonRefInfo( + "_refs.new_empty", + torch_opinfo_name="new_empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_empty_strided", + torch_opinfo_name="new_empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + ), + ), + PythonRefInfo( + "_refs.empty_strided", + torch_opinfo_name="empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_full", + torch_opinfo_name="new_full", + ), + PythonRefInfo( + "_refs.new_ones", + torch_opinfo_name="new_ones", + ), + PythonRefInfo( + "_refs.new_zeros", + torch_opinfo_name="new_zeros", + ), + # + # Conditional Reference OpInfos + # + PythonRefInfo( + "_refs.masked_fill", + torch_opinfo_name="masked_fill", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.where", + torch_opinfo_name="where", + op=lambda self, condition, other: refs.where(condition, self, other), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'), + ), + ), + PythonRefInfo( + "_refs.index_select", + torch_opinfo_name="index_select", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + # Sample out= with a stride of zero. This _out operation checks that the input has no + # inner overlap + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),) + ), + PythonRefInfo( + "_refs.index_copy", + torch_opinfo_name="index_copy", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.index_add", + torch_opinfo_name="index_add", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.index_fill", + torch_opinfo_name="index_fill", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),) + ), + # + # Test-related functions + # + PythonRefInfo( + "_refs.allclose", + torch_opinfo_name="allclose", + ), + # + # Misc functions + # + PythonRefInfo( + "_refs.stft", + torch_opinfo_name="stft", + skips=[ + # RuntimeError: no _refs support for aten.pad + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.istft", + torch_opinfo_name="istft", + skips=[ + # RuntimeError: no _refs support for aten.unfold_backward + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + DecorateInfo( + unittest.skip("Expected: unfold_backward() got an unexpected keyword argument 'input_sizes'"), + 'TestCommon', + 'test_python_ref_executor', + dtypes=(torch.complex64, torch.complex128), + ), + ], + ), + PythonRefInfo( + "_refs.view_as_complex", + torch_opinfo_name="view_as_complex", + ), +] +python_ref_db += opinfo.definitions.python_ref_db + +# Common operator groupings +ops_and_refs = op_db + python_ref_db +unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)] +binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)] +binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) +spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)] +sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] +sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] +sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] +shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)] +reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)] +reference_filtered_ops = [op for op in reduction_ops if op.ref is not None] +reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')] +sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')] + +# TODO: review porting these to make_tensor +def index_variable(shape, max_indices, device=torch.device('cpu')): + if not isinstance(shape, tuple): + shape = (shape,) + index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() + return index + +def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): + assert len(shape) == 2 + assert index_dim < 2 + batch_dim = 1 - index_dim + index = torch.zeros(*shape, dtype=torch.long, device=device) + for i in range(shape[index_dim]): + index.select(index_dim, i).copy_( + torch.randperm(max_indices, device=device)[:shape[batch_dim]]) + if duplicate: + index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) + return index + +def bernoulli_scalar(): + return torch.tensor(0, dtype=torch.bool).bernoulli_() + +def mask_not_all_zeros(shape): + assert len(shape) > 0 + while True: + result = torch.randn(shape).gt(0) + if result.sum() > 0: + return result + +# Copied from functorch +def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, True) + + +def skip(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, False) + + +def skipOps(test_case_name, base_test_name, to_skip): + all_opinfos = op_db + for xfail in to_skip: + op_name, variant_name, device_type, dtypes, expected_failure = xfail + matching_opinfos = [o for o in all_opinfos + if o.name == op_name and o.variant_test_name == variant_name] + assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" + for op in matching_opinfos: + decorators = list(op.decorators) + if expected_failure: + decorator = DecorateInfo(unittest.expectedFailure, + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + else: + decorator = DecorateInfo(unittest.skip("Skipped!"), + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + op.decorators = tuple(decorators) + + # This decorator doesn't modify fn in any way + def wrapped(fn): + return fn + return wrapped diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..00251ca264f8408c23cdcc5098c915f33b2bdc74 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py @@ -0,0 +1,4441 @@ +# mypy: ignore-errors + +import torch +import unittest +from copy import deepcopy +from enum import Enum +from functools import wraps, partial +from itertools import chain, product +import itertools +import math +import torch.nn.functional as F +from torch.nn.utils.rnn import pack_padded_sequence +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import TEST_CUDNN +from torch.testing._internal.common_dtype import ( + floating_types, floating_and_complex_types_and, get_all_fp_dtypes) +from torch.testing._internal.common_device_type import ( + _TestParametrizer, _update_param_kwargs, expectedFailureMPS, toleranceOverride, tol, + skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS, + skipCUDAVersionIn) +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_nn import ( + cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference, + hingeembeddingloss_reference, huberloss_reference, kldivloss_reference, + marginrankingloss_reference, multimarginloss_reference, multilabelmarginloss_reference, + nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction) +from torch.testing._internal.common_utils import ( + freeze_rng_state, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS, + skipIfTorchDynamo) +from types import ModuleType +from typing import List, Tuple, Type, Set, Dict +import operator + +# List of all namespaces containing modules to test. +MODULE_NAMESPACES: List[ModuleType] = [ + torch.nn.modules, + torch.ao.nn.qat.modules, + torch.ao.nn.quantizable.modules, + torch.ao.nn.quantized.modules, + torch.ao.nn.quantized.modules, +] + +# Modules that shouldn't be tested for one reason or another. +MODULES_TO_SKIP: Set[Type] = { + torch.nn.Module, # abstract base class + torch.nn.Container, # deprecated + torch.nn.NLLLoss2d, # deprecated + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d +} + +# List of all module classes to test. +MODULE_CLASSES: List[Type] = list(chain(*[ + [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined] + for namespace in MODULE_NAMESPACES])) +MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP] + +# Dict of module class -> common name. Useful for making test names more intuitive. +# Example: torch.nn.modules.linear.Linear -> "nn.Linear" +MODULE_CLASS_NAMES: Dict[Type, str] = {} +for namespace in MODULE_NAMESPACES: + for module_name in namespace.__all__: # type: ignore[attr-defined] + module_cls = getattr(namespace, module_name) + namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '') + + # Deal with any aliases by preferring earlier names. + if module_cls not in MODULE_CLASS_NAMES: + MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}' + + +# Specifies the modes (i.e. train, eval) to test over. +TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval')) + + +class modules(_TestParametrizer): + """ PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """ + + def __init__(self, module_info_iterable, allowed_dtypes=None, + train_eval_mode=TrainEvalMode.train_and_eval, skip_if_dynamo=True): + self.module_info_list = list(module_info_iterable) + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.train_eval_mode = train_eval_mode + self.skip_if_dynamo = skip_if_dynamo + + def _get_training_flags(self, module_info): + training_flags = [] + if (self.train_eval_mode == TrainEvalMode.train_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(True) + + if (self.train_eval_mode == TrainEvalMode.eval_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(False) + + # If train and eval modes don't differ for the module, don't bother using more than one. + if not module_info.train_and_eval_differ: + training_flags = training_flags[:1] + + return training_flags + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError('The @modules decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + for module_info in self.module_info_list: + dtypes = set(module_info.supported_dtypes(device_cls.device_type)) + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + training_flags = self._get_training_flags(module_info) + for (training, dtype) in product(training_flags, dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = module_info.formatted_name + if len(training_flags) > 1: + test_name += f"_{'train_mode' if training else 'eval_mode'}" + + # Construct parameter kwargs to pass to the test. + param_kwargs = {'module_info': module_info} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + _update_param_kwargs(param_kwargs, 'training', training) + + try: + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR: + test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper) + + decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for module {module_info.name}!") + raise ex + + +def get_module_common_name(module_cls): + if module_cls in MODULE_CLASS_NAMES: + # Example: "nn.Linear" + return MODULE_CLASS_NAMES[module_cls] + else: + return module_cls.__name__ + + +class FunctionInput: + """ Contains args and kwargs to pass as input to a function. """ + __slots__ = ['args', 'kwargs'] + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class ModuleInput: + """ Contains args / kwargs for module instantiation + forward pass. """ + __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn'] + + def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None): + self.constructor_input = constructor_input # Inputs to pass during construction + self.forward_input = forward_input # Inputs to pass to forward() + self.desc = desc # Description for this set of inputs + self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs) + + if reference_fn is not None: + + @wraps(reference_fn) + def copy_reference_fn(m, *args, **kwargs): + # Copy inputs to avoid undesired side effects from calling the reference. + args, kwargs = deepcopy(args), deepcopy(kwargs) + + # Note that module parameters are passed in for convenience. + return reference_fn(m, list(m.parameters()), *args, **kwargs) + + self.reference_fn = copy_reference_fn + +class ModuleErrorEnum(Enum): + """ Enumerates when error is raised when testing modules. """ + CONSTRUCTION_ERROR = 0 + FORWARD_ERROR = 1 + +class ErrorModuleInput: + """ + A ModuleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["module_error_input", "error_on", "error_type", "error_regex"] + + def __init__(self, + module_error_input, + *, + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex): + self.module_error_input = module_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class ModuleInfo: + """ Module information to be used in testing. """ + + def __init__(self, + module_cls, # Class object for the module under test + *, + module_inputs_func, # Function to generate module inputs + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + dtypes=floating_types(), # dtypes this function is expected to work with + dtypesIfMPS=(torch.float16, torch.float32,), # dtypes this function is expected to work with on MPS + dtypesIfHpu=(torch.bfloat16, torch.float32,), + supports_gradgrad=True, # whether the op supports second order gradients + gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck + module_memformat_affects_out=False, # whether converting module to channels last will generate + # channels last output + train_and_eval_differ=False, # whether the module has differing behavior between train and eval + module_error_inputs_func=None, # Function to generate module inputs that error + ): + self.module_cls = module_cls + self.module_inputs_func = module_inputs_func + self.decorators = (*(decorators if decorators else []), *(skips if skips else [])) + self.dtypes = dtypes + self.dtypesIfMPS = dtypesIfMPS + self.dtypesIfHpu = dtypesIfHpu + self.supports_gradgrad = supports_gradgrad + self.gradcheck_nondet_tol = gradcheck_nondet_tol + self.module_memformat_affects_out = module_memformat_affects_out + self.train_and_eval_differ = train_and_eval_differ + self.module_error_inputs_func = module_error_inputs_func + self.is_lazy = issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active(test_class, test_name, device, dtype, param_kwargs): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == 'mps': + return self.dtypesIfMPS + elif device_type == 'hpu': + return self.dtypesIfHpu + else: + return self.dtypes + + @property + def name(self): + return get_module_common_name(self.module_cls) + + @property + def formatted_name(self): + return self.name.replace('.', '_') + +# Start of module inputs functions. + +def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(10, 8), + forward_input=FunctionInput(input=make_input((4, 10))), + reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)), + ModuleInput(constructor_input=FunctionInput(10, 8, bias=False), + forward_input=FunctionInput(make_input((4, 10))), + desc='no_bias', + reference_fn=lambda m, p, i: torch.mm(i, p[0].t())), + ModuleInput(constructor_input=FunctionInput(3, 5), + forward_input=FunctionInput(make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1]) + ] + + return module_inputs + + +def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def bilinear_reference_fn(m, p, x1, x2, bias=True): + result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2) + if bias: + if x1.shape[0] == 1: + result = result.view(-1) + p[1] + else: + result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0]) + return result + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + reference_fn=bilinear_reference_fn), + ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + desc='no_bias', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)), + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input(2), make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))), + ] + + return module_inputs + + +def module_inputs_torch_nn_KLDivLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_batchmean', {'reduction': 'batchmean'}), + ('reduction_none', {'reduction': 'none'}), + ('log_target', {'log_target': True}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return kldivloss_reference(i, t, **constructor_kwargs) + + input = make_input((10, 10)).log() + target = make_input((10, 10)) if kwargs.get('log_target', False) else make_input((10, 10)).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, target), + desc=desc, + reference_fn=reference_fn) + ) + + scalar_input = make_input(()).log() + scalar_target = make_input(()) if kwargs.get('log_target', False) else make_input(()).log() + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(scalar_input, scalar_input), + desc='scalar_' + desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_none', {'reduction': 'none'}), + ('ignore_index', {'ignore_index': 2}), + ('weights', {'weight': make_weight(4).abs()}), + ('weights_ignore_index', {'weight': make_weight(4).abs(), 'ignore_index': 2}), + ('weights_ignore_index_neg', {'weight': make_weight(4).abs(), 'ignore_index': -1}) + ] + + # TODO: Uncomment when negative weights is supported. + # negative_weight = make_weight(10) + # negative_weight[0] = -1 + # cases.append(('weights_negative', {'weight': negative_weight})) + module_inputs = [] + for desc, constructor_kwargs in cases: + + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nllloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 4)), + torch.empty(15, device=device).uniform_().mul(4).floor().long()), + desc=desc, + reference_fn=reference_fn) + ) + + def nd_reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nlllossNd_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5)), + torch.empty(2, 5, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"nd_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5, 5, 2, 2)), + torch.empty(2, 5, 5, 2, 2, device=device).uniform_().mul(4).floor().long()), + desc=f"higher_dim_{desc}", + reference_fn=nd_reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput( + make_input((2, 4, 5)), + torch.empty(2, 5, device=device).uniform_().mul(4).floor().long()), + desc=f"3d_{desc}", + reference_fn=nd_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(3), + make_target(3), + make_input(1).abs()), + desc=desc, + reference_fn=no_batch_dim_reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_PoissonNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('full', {'full': True}), + ('no_log_input', {'log_input': False}), + ('full_no_log_input', {'full': True, 'log_input': False}), + ] + + def poissonnllloss_reference_fn(i, t, log_input=True, full=False, reduction='mean', eps=1e-8): + if log_input: + result = i.exp() - t.mul(i) + else: + result = i - t.mul((i + eps).log()) + + if full: + result += (t.mul(t.log()) - t + 0.5 * (2. * math.pi * t).log()).masked_fill(t <= 1, 0) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return poissonnllloss_reference_fn(i, t, **constructor_kwargs) + + log_input = constructor_kwargs.get('log_input', True) + input = make_input((2, 3, 4, 5)) if log_input else make_input((2, 3, 4, 5)).abs().add(0.001) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(input, + make_target((2, 3, 4, 5)).floor_().abs_()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MSELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + def mse_loss_reference_fn(m, p, i, t, reduction='mean'): + if reduction == 'none': + return (i - t).pow(2) + elif reduction == 'mean': + return (i - t).pow(2).sum() / i.numel() + else: + return (i - t).pow(2).sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 4, 5)), + make_target((2, 3, 4, 5))), + desc=desc, + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(())), + desc=f'{desc}_scalar', + reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def no_batch_dim_reference_fn(m, p, *args, **kwargs): + """Reference function for modules supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + + Currently it only supports modules which return a single Tensor as output. + You can bind the following kwargs. + Kwargs: + batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` . + and output will be squeezed at dim `0` else dim `1` for both. + kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze. + Useful if there are few arguments whose batch dimension are different + from the ones selected by `batch_first`. + is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly. + """ + def get_and_pop(key, default): + v = kwargs.get(key, default) + if key in kwargs: + kwargs.pop(key) + return v + + batch_dim = 0 if get_and_pop('batch_first', True) else 1 + kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None) + is_criterion = get_and_pop('is_criterion', False) + + if kwargs_to_batchify is not None: + assert isinstance(kwargs_to_batchify, dict) + for k, v in kwargs.items(): + if k in kwargs_to_batchify and v is not None: + bdim = kwargs_to_batchify[k] + kwargs[k] = v.unsqueeze(bdim) + + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim) + + if is_criterion: + reduction = get_reduction(m) + if reduction == 'none': + return output.squeeze(0) + return output + + +def no_batch_dim_reference_mha(m, p, *args, **kwargs): + """Reference function for MultiheadAttention supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + batch_dim = 0 if kwargs.get('batch_first', True) else 1 + if 'batch_first' in kwargs: + kwargs.pop('batch_first') + if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: + kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(0)) + + +def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): + """Reference function for RNN and GRU supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = h.unsqueeze(1) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(1)) + + +def no_batch_dim_reference_lstm(m, p, *args, **kwargs): + """Reference function for LSTM supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = (h[0].unsqueeze(1), h[1].unsqueeze(1)) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1))) + + +def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs): + """Reference function for LSTMCell supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + inp, (h, c) = args + single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0))) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(0), output[1].squeeze(0)) + + +def generate_regression_criterion_inputs(make_input): + return [ + ModuleInput( + constructor_input=FunctionInput(reduction=reduction), + forward_input=FunctionInput(make_input((4, )), make_input(4,)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True), + desc=f'no_batch_dim_{reduction}' + ) for reduction in ['none', 'mean', 'sum']] + + +def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(kernel_size=2), + forward_input=FunctionInput(make_input((3, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((2, 3, 6)))), + ModuleInput(constructor_input=FunctionInput((2,), (2,)), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, 1), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride_pad')] + + +def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((3, 6, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride_pad')] + + + +def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((3, 4, 4, 4))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride_pad_gpu_input_nooverlap'), + ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='divisor_stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride_pad_gpu_input_nooverlap')] + + + +def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='one_output')] + + +def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single_1x1output'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + +def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 2, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((None, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput((3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))), + desc='last_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + + +def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))), + desc='single_nonatomic'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))), + desc='tuple_nonatomic')] + + +def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(10,), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine'), + ModuleInput(constructor_input=FunctionInput(5,), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, None), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine_simple_average'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input_not_affine'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 9))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='2d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='3d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs): + N = kwargs['N'] + lazy = kwargs.get('lazy', False) + transposed = kwargs.get('transposed', False) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}] + kernel_size, C_in, C_out = 3, 4, 5 + input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N)) + input_batch_shape = (2,) + input_no_batch_shape + return [ + ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else + FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)), + forward_input=FunctionInput(make_input( + input_batch_shape if with_batch else input_no_batch_shape)), + desc=('' if with_batch else 'no_batch_dim'), + reference_fn=(None if with_batch else no_batch_dim_reference_fn)) + for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list) + ] + + +def module_inputs_torch_nn_CosineEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.7}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return cosineembeddingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10)), make_input((15, 10)), + make_target((15,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input')] + + +def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((5, 6, 7))), + desc='dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((4,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='with_negval'), + ModuleInput(constructor_input=FunctionInput(0.0), + forward_input=FunctionInput(make_input((10, 10))), + desc='with_zero_negval'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input(())), + desc='with_negval_scalar')] + + +def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d_multiparam')] + + +def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar')] + + +def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))] + + +def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), + desc='multiparam'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), + desc='multiparam_scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 5, 10))), + desc='multidim'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((3, 4, 10))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.log(1 + torch.exp(i))), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: 1. / 2. * torch.log(1 + torch.exp(2 * i)), + desc='beta'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input(())), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='lambda'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input(())), + desc='lambda_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + + +def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='threshold_value'), + ModuleInput(constructor_input=FunctionInput(2., 10.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='large_value'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(())), + desc='threshold_value_scalar'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4)), + make_input((2, 3, 4))), + reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum() + for a, b in zip(i, t))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(()), make_input(())), + reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(), + desc='scalar')] + generate_regression_criterion_inputs(make_input) + + +def module_inputs_torch_nn_SmoothL1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return smoothl1loss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_input(())), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + + +def module_inputs_torch_nn_BCELoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ] + + def bce_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = -(t * i.log() + (1 - t) * (1 - i).log()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_loss_reference_fn, **constructor_kwargs)) + ) + + scalar_weight = make_weight(()) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(weight=scalar_weight), + forward_input=FunctionInput(make_input((), low=1e-2, high=1 - 1e-2), + make_target(()).gt(0).to(dtype)), + desc='scalar_weight', + reference_fn=partial(bce_loss_reference_fn, weight=scalar_weight)) + ) + + return module_inputs + + +def module_inputs_torch_nn_BCEWithLogitsLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weights', {'weight': make_weight((10,))}), + ('scalar_weights', {'weight': make_weight(())}) + ] + + def bce_withlogitsloss_reference_fn(m, p, i, t, reduction='mean', weight=None): + # TODO: add pos_weight to the definition here and corresponding SampleInputs + max_val = (-i).clamp(min=0) + result = (1 - t).mul_(i).add_(max_val).add_((-max_val).exp_().add_((-i - max_val).exp_()).log_()) + + if weight is not None: + result = result * weight + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.sum() / i.numel() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), + make_target((15, 10)).gt(0).to(dtype)), + desc=desc, + reference_fn=partial(bce_withlogitsloss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + reductions: List[str] = ['mean', 'sum', 'none'] + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('weights', {'weight': make_weight((3,))}), + ('ignore_index', {'ignore_index': 1}), + ('label_smoothing', {'label_smoothing': 0.15}), + ('ignore_index_label_smoothing', {'ignore_index': 1, 'label_smoothing': 0.15}) + ] + + module_inputs = [] + for reduction, (desc, constructor_kwargs) in product(reductions, cases): + def reference_fn(m, p, i, t, reduction=reduction, constructor_kwargs=constructor_kwargs): + return cross_entropy_loss_reference(i, t, reduction=reduction, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5)), + make_target((2, 5, 5), low=0, high=3)), + desc=f"4d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5)), + make_target((2, 5), low=0, high=3)), + desc=f"3d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3)), + make_target((2), low=0, high=3)), + desc=f"2d_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_target((2, 5, 5, 2, 2), low=0, high=3)), + desc=f"higher_dim_{desc}_{reduction}", + reference_fn=reference_fn) + ) + + if constructor_kwargs.get('ignore_index', None) is None: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4, 2)), + make_input((5, 3, 4, 2)).softmax(dim=1)), + desc=f"4d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3, 4)), + make_input((5, 3, 4)).softmax(dim=1)), + desc=f"3d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((5, 3)), + make_input((5, 3)).softmax(dim=1)), + desc=f"2d_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), + make_input((2, 3, 5, 5, 2, 2)).softmax(dim=1)), + desc=f"higher_dim_prob_target_{desc}_{reduction}", + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), + forward_input=FunctionInput(make_input((3,)), + make_target((), low=0, high=3)), + desc=f"no_batch_dim_{desc}_{reduction}", + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + + return module_inputs + + + +def module_inputs_torch_nn_CTCLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('blank', {'blank': 14}) + ] + target_dtypes = [torch.int, torch.long] + + module_inputs = [] + for target_dtype, (desc, constructor_kwargs) in product(target_dtypes, cases): + def reference_fn(m, p, i, t, il, tl, constructor_kwargs=constructor_kwargs): + return ctcloss_reference(i, t, il, tl, **constructor_kwargs) + + blank = constructor_kwargs.get('blank', 0) + low = 0 if blank == 14 else 1 + high = 14 if blank == 14 else 15 + + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((3, 30), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_lengths_tensors', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + (50, 50, 50), (30, 25, 20)), + desc=f'{desc}_1d_target_lengths_intlists', + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput( + constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), + make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), + torch.tensor((50, 50, 50), device=device), + torch.tensor((30, 25, 20), device=device)), + desc=f'{desc}_1d_target_lengths_tensors', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 5))), + desc='1d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 12, 1e-3), + forward_input=FunctionInput(make_input((4, 12))), + desc='1d_affine_GN'), + ModuleInput( + constructor_input=FunctionInput(1, 6, 1e-3), + forward_input=FunctionInput(make_input((150, 6))), + desc='1d_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput(5, 5, 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 10, 1e-3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='1d_no_affine_LN'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 2, 3))), + desc='2d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_LN'), + ] + + +def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input((4, 3, 2, 4))), + ), + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input(())), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input') + ] + + +def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_HingeEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return hingeembeddingloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10,)).gt(0).to(dtype).mul_(2).sub_(1)), + desc=desc, + reference_fn=reference_fn) + ) + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(()), + make_target(()).gt(0).to(dtype).mul_(2).sub_(1)), + desc=f'scalar_{desc}', + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_HuberLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return huberloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_input((5, 10))), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lazy = kwargs.get('lazy', False) + N = kwargs['N'] + num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True + input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)} + input_no_batch_shape = input_no_batch_shape_dict[N] + input_batch_shape = (4,) + input_no_batch_shape + + return [ + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_batch_shape))), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_batch_shape)), + desc='tracking_stats'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='tracking_stats_no_batch_dim'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim') + ] + +def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine_no_bias'), + ] + +def module_inputs_torch_nn_RMSNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def rms_norm_reference_fn(m, p, i): + eps = m.eps + if eps is None: + eps = torch.finfo(i.dtype).eps + ndim = i.ndim + normalized_shape = m.normalized_shape + weight = m.weight + dims = [ndim - i - 1 for i in range(len(normalized_shape))] + result = i * torch.rsqrt(i.pow(2).mean(dim=dims, keepdim=True) + m.eps) + if weight is not None: + result *= weight + return result + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine', + reference_fn=rms_norm_reference_fn), + ] + + +def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 5, 7))), + desc='1d'), + ModuleInput( + constructor_input=FunctionInput(2,), + forward_input=FunctionInput(make_input((1, 5, 7, 7))), + desc='2d_uneven_pad'), + ModuleInput( + constructor_input=FunctionInput(1, 1., 0.5, 2.), + forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))), + desc='3d_custom_params'), + ] + + +def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7))), + desc='norm'), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((1, 3, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((3, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ] + + + +def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_LPPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((3, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput(4, 4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(4, return_indices=True), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((3, 7, 7))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='4d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='return_indices'), + ] + +def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))), + ModuleInput( + constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_padding'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 7, 6))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 5, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 7, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))), + desc='asymsize'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 5, 5, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format' + ) + ] + + +def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.sigmoid().log(), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: i.sigmoid().log(), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ] + + +def module_inputs_torch_nn_MarginRankingLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('margin', {'margin': 0.5}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): + return marginrankingloss_reference(i1, i2, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((50,)), make_input((50,)), + make_target((50,)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multilabelmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((10,)), + make_target((10), low=0, high=10)), + desc=f'1d_{desc}', + reference_fn=reference_fn) + ) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('p', {'p': 2}), + ('margin', {'margin': 0.5}), + ('weights', {'weight': make_weight(10)}) + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return multimarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5), low=0, high=10)), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_MultiLabelSoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ('weight', {'weight': make_weight(10)}), + ] + + def multilabelsoftmargin_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): + result = t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log() + if weight is not None: + result *= weight + result = (-result).sum(i.dim() - 1) / i.size(-1) + + if reduction == 'none': + return result + elif reduction == 'mean': + return result.mean() + else: + return result.sum() + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 10)), + make_target((5, 10), low=0, high=2)), + desc=desc, + reference_fn=partial(multilabelsoftmargin_loss_reference_fn, **constructor_kwargs)) + ) + + return module_inputs + + +def module_inputs_torch_nn_SoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return softmarginloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((5, 5)), + make_target((5, 5)).sign()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs): + # Reuse the TransformerEncoderLayer samples since the forward args are nearly the same. + samples = [] + for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer( + None, device, dtype, requires_grad, training): + # Construct a TransformerEncoderLayer object to pass to TransformerEncoder. + l_args, l_kwargs = (layer_module_input.constructor_input.args, + layer_module_input.constructor_input.kwargs) + l_kwargs['device'] = device + l_kwargs['dtype'] = dtype + encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs) + num_layers = 2 + # Note: TransformerEncoderLayer takes a "src_mask" while + # TransformerEncoder takes a "mask"; rename kwarg appropriately. + forward_input = layer_module_input.forward_input + if 'src_mask' in forward_input.kwargs: + forward_input.kwargs['mask'] = forward_input.kwargs['src_mask'] + del forward_input.kwargs['src_mask'] + samples.append(ModuleInput( + constructor_input=FunctionInput(encoder_layer, num_layers), + forward_input=forward_input, + desc=layer_module_input.desc + )) + return samples + +def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='no_bias' + ), ] + + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for src_mask, src_key_padding_mask, norm_first, batch_first, bias in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, kwargs_to_batchify={'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + # Samples below where we pass reference_fn are for validating the fast path, + # since the fast path requires no_grad mode, we run the fast path in .eval() + # and no_grad() in the reference_fn and verify that against the results in train mode. + def fast_path_reference_fn(module, parameters, *args, **kwargs): + assert module.training + module.train(False) + with torch.no_grad(): + output = module(*args, **kwargs) + module.train(True) + return output + + if training: + for norm_first, bias in itertools.product((True, False), (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput( + 4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias + ), + forward_input=FunctionInput( + make_input((2, 3, 4)), + ), + # fastpath doesn't run when bias=False + reference_fn=fast_path_reference_fn if bias else None, + desc=f'fastpath_{bias}_norm_first_{norm_first}' + ) + ) + + return samples + + +def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='no_bias' + ), ] + + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for tgt_mask, tgt_key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + memory_mask = tgt_mask + memory_key_padding_mask = tgt_key_padding_mask + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src, tgt = src.transpose(0, 1), tgt.transpose(0, 1) + if tgt_key_padding_mask is not None: + memory_key_padding_mask, tgt_key_padding_mask = (tgt_key_padding_mask.expand(2, 3),) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=batch_first, + norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + desc=f'norm_first_{norm_first}_batch_first_{batch_first}_bias_{bias}' + )) + + return samples + + +def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for mask, key_padding_mask, norm_first, bias, batch_first in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): + # Using same mask for tgt and memory + src_mask , tgt_mask = (mask,) * 2 + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=batch_first, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), + desc=f'no_batch_dim_batch_first_{batch_first}' + )) + + src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) + if not batch_first: + src = src.transpose(0, 1) + tgt = tgt.transpose(0, 1) + if key_padding_mask is not None: + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask.expand(2, 3),) * 2 + + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + src, tgt, tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + )) + return samples + + +def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs): + make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False) + return [ + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(2, 3).random_(4)) + ), + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)), + desc='discontiguous' + ), + ] + + +def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + bool_vals = (True, False) + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3))) + products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks) + for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products: + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=no_batch_dim_reference_mha, + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=partial(no_batch_dim_reference_mha, batch_first=False), + ) + ) + + return samples + + +def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ] + + is_rnn = kwargs.get('is_rnn', False) + if is_rnn: + # RNN also supports `nonlinearity` argument. + # `tanh` is the default, so we check with `relu` + samples.append( + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = ( + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ) + + return samples + +def make_packed_sequence(inp, batch_sizes): + required_grad = inp.requires_grad + inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads + seq = pack_padded_sequence(inp, batch_sizes) + seq.data.requires_grad_(required_grad) + return seq + + +def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + is_rnn = kwargs['is_rnn'] + nonlinearity = ('relu', 'tanh') + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + + samples = [] + if is_rnn: + prod_gen = product(nonlinearity, bias, batch_first, bidirectional) + else: + prod_gen = product(bias, batch_first, bidirectional) + + for args in prod_gen: + if is_rnn: + nl, b, b_f, bidir = args + else: + b, b_f, bidir = args + + cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + if is_rnn: + cons_args['nonlinearity'] = nl + cons_args_hidden['nonlinearity'] = nl + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((3, 2))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + if with_packed_sequence: + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + proj_sizes = (0, 2) + + samples = [] + prod_gen = product(bias, batch_first, bidirectional, proj_sizes) + + for args in prod_gen: + b, b_f, bidir, proj_size = args + hidden_size = 3 + cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((2, 2))), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + h_out = proj_size if proj_size > 0 else hidden_size + hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size))) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), hx), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + + return samples + + + +def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))), + ), + ] + +def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 2), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2), 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4), 5), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7), + forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))), + ), + ] + +def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding1d_circular_ref(inp, pad): + r""" input: + [[[0., 1., 2.], + [3., 4., 5.]]] + pad: (1, 2) + output: + [[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.]]] + """ + return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 1)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding2d_circular_ref(inp, pad): + r"""input: + [[[[0., 1., 2], + [3., 4., 5.]]]] + pad: (1, 2, 2, 1) + output: + [[[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.]]]] + """ + inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2) + return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 2, 1)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((2, 3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 3, 1)), + forward_input=FunctionInput(make_input((1, 1, 3, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + def padding3d_circular_ref(inp, pad): + r"""input: + [[[[[ 0., 1., 2.], + [ 3., 4., 5.]], + [[ 6., 7., 8.], + [ 9., 10., 11.]]]]] + pad: (1, 2, 2, 1, 1, 2) + output: [[[[[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]]]]] + """ + inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2) + inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3) + return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ] + + +# All these operators share similar issues on cuDNN and MIOpen +rnn_gru_lstm_module_info_decorators = ( + # RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward. + # We could not generate a fallback + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_grad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented. + # Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_gradgrad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # CUDNN GRU doesn't accept non-contiguous hx + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float). + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestExpandedWeightModule", "test_module", + device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestDecomp", "test_rnn_decomp_module", + device_type='cuda' + ) +) + +# Start of module error inputs functions. + +def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hidden to be 1D or 2D, got 4D instead" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'relu'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'tanh'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ] + return samples + +def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead" + ), + ] + return samples + + +def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs): + samples = [ + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 0, 1)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="hidden_size must be greater than zero" + ), + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 10, 0)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="num_layers must be greater than zero" + ), + ] + return samples + +def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 2D or 3D input \(got 4D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 3D or 4D input \(got 2D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 4D or 5D input \(got 2D input\)", + + ), + ] + + +_macos15_or_newer = torch.backends.mps.is_available() and torch.backends.mps.is_macos_or_newer(15, 0) + + +# Database of ModuleInfo entries in alphabetical order. +module_db: List[ModuleInfo] = [ + ModuleInfo(torch.nn.AdaptiveAvgPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS), + # Fails on backward check if output size is 1x1 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + ),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d, + ), + ModuleInfo(torch.nn.AdaptiveMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AvgPool1d, + module_inputs_func=module_inputs_torch_nn_AvgPool1d, + ), + ModuleInfo(torch.nn.AvgPool2d, + module_inputs_func=module_inputs_torch_nn_AvgPool2d, + skips=( + # The difference between channels last backward and + # channels first backward of AvgPool2d on CUDA is too large + # See https://github.com/pytorch/pytorch/issues/107201 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='cuda', + ), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),), + ), + ModuleInfo(torch.nn.AvgPool3d, + module_inputs_func=module_inputs_torch_nn_AvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # No channels_last support for AvgPool1d as it does not take 4D inputs + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.BatchNorm1d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm1d, + skips=( + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + )) + ), + ModuleInfo(torch.nn.BatchNorm2d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm2d, + skips=( + # See https://github.com/pytorch/pytorch/issues/134580 + DecorateInfo(expectedFailureMPS, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training')), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + ),) + ), + ModuleInfo(torch.nn.BatchNorm3d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm3d, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=operator.itemgetter('training') + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=operator.itemgetter('training') + ),) + ), + ModuleInfo(torch.nn.CELU, + module_inputs_func=module_inputs_torch_nn_CELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Conv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Conv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]),), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Fails on backward check because ViewAsRealBackward apply contiguous for grad + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64, torch.complex128]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # Not implemented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True), + dtypes=floating_and_complex_types_and(torch.chalf), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # ConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + # These fail only on ROCm + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.CosineEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_CosineEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ELU, + module_inputs_func=module_inputs_torch_nn_ELU, + # not MPS specific, will be xfailed for all devices in next PR + skips=( + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.FractionalMaxPool2d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.FractionalMaxPool3d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.L1Loss, + module_inputs_func=module_inputs_torch_nn_L1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.SmoothL1Loss, + module_inputs_func=module_inputs_torch_nn_SmoothL1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LazyConv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' + # xfail does not work due to Fatal Python error: Aborted + DecorateInfo(skipIfMps, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float16]), + DecorateInfo(skipIfMps, "TestModule", "test_non_contiguous_tensors", + device_type='mps', dtypes=[torch.float16]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Linear, + module_inputs_func=module_inputs_torch_nn_Linear, + skips=( + # No channels_last support for Linear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Bilinear, + module_inputs_func=module_inputs_torch_nn_Bilinear, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-4, rtol=1e-4), + torch.float64: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_forward', device_type='cpu'), + ], + skips=( + # No channels_last support for Bilinear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LPPool1d, + module_inputs_func=module_inputs_torch_nn_LPPool1d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.LPPool2d, + module_inputs_func=module_inputs_torch_nn_LPPool2d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LPPool3d, + module_inputs_func=module_inputs_torch_nn_LPPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps),) + ), + ModuleInfo(torch.nn.MaxPool1d, + module_inputs_func=module_inputs_torch_nn_MaxPool1d, + ), + ModuleInfo(torch.nn.MaxPool2d, + module_inputs_func=module_inputs_torch_nn_MaxPool2d, + ), + ModuleInfo(torch.nn.MaxPool3d, + module_inputs_func=module_inputs_torch_nn_MaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.KLDivLoss, + module_inputs_func=module_inputs_torch_nn_KLDivLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # https://github.com/pytorch/pytorch/issues/115588 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MSELoss, + module_inputs_func=module_inputs_torch_nn_MSELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16]), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MarginRankingLoss, + module_inputs_func=module_inputs_torch_nn_MarginRankingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiLabelMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multilabel_margin_loss_forward' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::multilabel_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.MultiMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # 'aten::multi_margin_loss' is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # RuntimeError: derivative for aten::multi_margin_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) + ), + ModuleInfo(torch.nn.SoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_SoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.MultiLabelSoftMarginLoss, + module_inputs_func=module_inputs_torch_nn_MultiLabelSoftMarginLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.NLLLoss, + module_inputs_func=module_inputs_torch_nn_NLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GaussianNLLLoss, + module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.PoissonNLLLoss, + module_inputs_func=module_inputs_torch_nn_PoissonNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.HingeEmbeddingLoss, + module_inputs_func=module_inputs_torch_nn_HingeEmbeddingLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.HuberLoss, + module_inputs_func=module_inputs_torch_nn_HuberLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: seemingly incorrect output dtype + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCELoss, + module_inputs_func=module_inputs_torch_nn_BCELoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # error: input types 'tensor' and 'tensor<15x10xf16>' are not broadcast compatible + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.BCEWithLogitsLoss, + module_inputs_func=module_inputs_torch_nn_BCEWithLogitsLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # see #119108: tolerance issue + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.CrossEntropyLoss, + module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss, + dtypes=get_all_fp_dtypes(include_half=True, include_bfloat16=False), + decorators=( + # No channels_last support for loss functions. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-2, rtol=1e-3)}), "TestModule", + "test_forward", dtypes=[torch.float16], device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16], + device_type='cuda'),), + ), + ModuleInfo(torch.nn.CTCLoss, + module_inputs_func=module_inputs_torch_nn_CTCLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # The operator aten::_ctc_loss is not currently implemented for the MPS device. + DecorateInfo(skipIfMps, 'TestModule'), + # derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + # https://github.com/pytorch/pytorch/issues/115585 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),) + ), + ModuleInfo(torch.nn.GELU, + module_inputs_func=module_inputs_torch_nn_GELU, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", + device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.GLU, + module_inputs_func=module_inputs_torch_nn_GLU, + ), + ModuleInfo(torch.nn.GroupNorm, + module_inputs_func=module_inputs_torch_nn_GroupNorm, + dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True), + skips=( + # Tracking at https://github.com/pytorch/pytorch/issues/98089 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_memory_format', device_type='cpu'), + # No channels_last support for GroupNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'), + DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad", + active_if=TEST_WITH_ROCM, device_type='cuda'),) + ), + ModuleInfo(torch.nn.Hardshrink, + module_inputs_func=module_inputs_torch_nn_Hardshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),), + ), + ModuleInfo(torch.nn.Hardswish, + module_inputs_func=module_inputs_torch_nn_Hardswish, + skips=None if _macos15_or_newer else ( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),), + supports_gradgrad=False), + ModuleInfo(torch.nn.Hardtanh, + module_inputs_func=module_inputs_torch_nn_Hardtanh, + ), + ModuleInfo(torch.nn.InstanceNorm1d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm1d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm2d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2), + train_and_eval_differ=True, + skips=( + # No channels_last support for InstanceNorm2d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm3d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3), + train_and_eval_differ=True, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # No channels_last support for InstanceNorm3d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LocalResponseNorm, + module_inputs_func=module_inputs_torch_nn_LocalResponseNorm, + skips=( + # uses avg_pool3d which is not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.LayerNorm, + module_inputs_func=module_inputs_torch_nn_LayerNorm, + skips=( + # No channels_last support for LayerNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.RMSNorm, + module_inputs_func=module_inputs_torch_nn_RMSNorm, + ), + # TransformerEncoder takes the same inputs as TransformerEncoderLayer + ModuleInfo(torch.nn.TransformerEncoder, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoder, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # Doesn't support device / dtype kwargs directly because it is just a + # container of TransformerEncoderLayers. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'),) + ), + ModuleInfo(torch.nn.TransformerEncoderLayer, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='cpu', active_if=IS_WINDOWS), + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.TransformerDecoderLayer, + module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerDecoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Transformer, + module_inputs_func=module_inputs_torch_nn_Transformer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for Transformer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.MultiheadAttention, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_MultiheadAttention, + skips=( + # No channels_last support for MultiheadAttention currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Embedding, + module_inputs_func=module_inputs_torch_nn_Embedding, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='mps')], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.ReLU, + module_inputs_func=module_inputs_torch_nn_ReLU, + skips=None if _macos15_or_newer else ( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LeakyReLU, + module_inputs_func=module_inputs_torch_nn_LeakyReLU, + ), + ModuleInfo(torch.nn.ReLU6, + module_inputs_func=module_inputs_torch_nn_ReLU6, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.PReLU, + module_inputs_func=module_inputs_torch_nn_PReLU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNNCell, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.GRUCell, + module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + ), + ModuleInfo(torch.nn.LSTMCell, + module_inputs_func=module_inputs_torch_nn_LSTMCell, + module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell, + ), + ModuleInfo(torch.nn.Sigmoid, + module_inputs_func=module_inputs_torch_nn_Sigmoid, + skips=None if _macos15_or_newer else ( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LogSigmoid, + module_inputs_func=module_inputs_torch_nn_LogSigmoid, + skips=( + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.SiLU, + module_inputs_func=module_inputs_torch_nn_SiLU, + ), + ModuleInfo(torch.nn.Softmax, + module_inputs_func=module_inputs_torch_nn_Softmax, + ), + ModuleInfo(torch.nn.Softmax2d, + module_inputs_func=module_inputs_torch_nn_Softmax2d, + skips=( + # no channels last support for Softmax2d currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: tolerance issue + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.LogSoftmax, + module_inputs_func=module_inputs_torch_nn_LogSoftmax, + skips=( + # no channels last support for LogSoftmax currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # See #119108: inf nan error + DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) + ), + ModuleInfo(torch.nn.Softmin, + module_inputs_func=module_inputs_torch_nn_Softmin, + skips=( + # no channels last support for Softmin currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Softplus, + module_inputs_func=module_inputs_torch_nn_Softplus, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softshrink, + module_inputs_func=module_inputs_torch_nn_Softshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softsign, + module_inputs_func=module_inputs_torch_nn_Softsign, + ), + ModuleInfo(torch.nn.Tanh, + module_inputs_func=module_inputs_torch_nn_Tanh, + skips=None if _macos15_or_newer else ( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Tanhshrink, + module_inputs_func=module_inputs_torch_nn_Tanhshrink, + skips=None if _macos15_or_newer else ( + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=operator.itemgetter('training'), + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Threshold, + module_inputs_func=module_inputs_torch_nn_Threshold, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Mish, + module_inputs_func=module_inputs_torch_nn_Mish, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNN, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators + ), + ModuleInfo(torch.nn.GRU, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.LSTM, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_LSTM, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + # LSTM with projections is not currently supported with MPS + DecorateInfo(skipMPS),), + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.ReflectionPad1d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad1d, + ), + ModuleInfo(torch.nn.ReflectionPad2d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReflectionPad3d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad1d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad1d, + ), + ModuleInfo(torch.nn.ReplicationPad2d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.ReplicationPad3d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'),) + ), + ModuleInfo(torch.nn.SELU, + module_inputs_func=module_inputs_torch_nn_SELU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.ZeroPad1d, + module_inputs_func=module_inputs_torch_nn_ZeroPad1d, + ), + ModuleInfo(torch.nn.ZeroPad2d, + module_inputs_func=module_inputs_torch_nn_ZeroPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ZeroPad3d, + module_inputs_func=module_inputs_torch_nn_ZeroPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.CircularPad1d, + module_inputs_func=module_inputs_torch_nn_CircularPad1d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad1d, + ), + ModuleInfo(torch.nn.CircularPad2d, + module_inputs_func=module_inputs_torch_nn_CircularPad2d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad2d, + ), + ModuleInfo(torch.nn.CircularPad3d, + module_inputs_func=module_inputs_torch_nn_CircularPad3d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),) + ), + ModuleInfo(torch.nn.ConstantPad1d, + module_inputs_func=module_inputs_torch_nn_ConstantPad1d, + ), + ModuleInfo(torch.nn.ConstantPad2d, + module_inputs_func=module_inputs_torch_nn_ConstantPad2d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ConstantPad3d, + module_inputs_func=module_inputs_torch_nn_ConstantPad3d, + skips=( + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ) +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd7b827dde32f0d26380bdbe5766c1eece3da6e --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py @@ -0,0 +1,227 @@ +# mypy: ignore-errors + +r"""Importing this file includes common utility methods for checking quantized +tensors and modules. +""" +import numpy as np +import torch +from contextlib import contextmanager +from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS + +supported_qengines = torch.backends.quantized.supported_engines +supported_qengines.remove('none') +# Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326 +# QNNPACK is not supported on PPC +# QNNPACK throws ASAN heap-buffer-overflow error. +if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]): + supported_qengines.remove('qnnpack') + +def _conv_output_shape(input_size, kernel_size, padding, stride, dilation, + output_padding=0): + """Computes the output shape given convolution parameters.""" + return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1) + * (dilation - 1)) / stride) + 2 * output_padding + 1 + +# Quantization references +def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): + """Quantizes a numpy array.""" + if qmin is None: + qmin = np.iinfo(dtype).min + if qmax is None: + qmax = np.iinfo(dtype).max + qx = np.round(x / scale + zero_point).astype(np.int64) + qx = np.clip(qx, qmin, qmax) + qx = qx.astype(dtype) + return qx + + +def _dequantize(qx, scale, zero_point): + """Dequantizes a numpy array.""" + x = (qx.astype(float) - zero_point) * scale + return x + + +def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): + """Requantizes a numpy array, i.e., intermediate int32 or int16 values are + converted back to given type""" + qx = (x * multiplier).round() + zero_point + qx = np.clip(qx, qmin, qmax).astype(qtype) + return qx + +def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric) + if qscheme == torch.per_tensor_symmetric: + assert dtype == torch.qint8 + if isinstance(X, torch.Tensor): + X = X.numpy() + if dtype == torch.qint8: + if reduce_range: + qmin, qmax = -64, 63 + else: + qmin, qmax = -128, 127 + else: # dtype == torch.quint8 + if reduce_range: + qmin, qmax = 0, 127 + else: + qmin, qmax = 0, 255 + min_val = X.min() + max_val = X.max() + is_symmetric = (qscheme == torch.per_tensor_symmetric) + if min_val == max_val: + scale = 1.0 + zero_point = 0 + else: + if is_symmetric: + max_val = max(max_val, -min_val) + min_val = -max_val + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = qmin - round(min_val / scale) + zero_point = max(qmin, zero_point) + zero_point = min(qmax, zero_point) + return [float(scale), int(zero_point)] + +def _calculate_dynamic_per_channel_qparams(X, dtype): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + if isinstance(X, torch.Tensor): + X = X.numpy() + qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max + n_levels = qmax - qmin + scale = np.zeros(X.shape[0], dtype=np.float64) + zero_point = np.zeros(X.shape[0], dtype=np.int64) + for i in range(zero_point.shape[0]): + min_val = X.min() + max_val = X.max() + if min_val == max_val: + scale[i] = 1.0 + zero_point[i] = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale[i] = (max_val - min_val) / n_levels + scale[i] = max(scale[i], np.finfo(np.float32).eps) + zero_point[i] = qmin - round(min_val / scale[i]) + zero_point[i] = max(qmin, zero_point[i]) + zero_point[i] = min(qmax, zero_point[i]) + + return scale, zero_point + +def _snr(x, x_hat): + """Calculates the signal to noise ratio and returns the signal and noise + power, as well as the SNR in dB. + If the input is a list/tuple this function is called recursively on each + element. The result will have the same nested structure as the inputs. + + Args: + x, x_hat: Either a tensor or a nested list/tuple of tensors. + Returns: + signal, noise, SNR(in dB): Either floats or a nested list of floats + """ + if isinstance(x, (list, tuple)): + assert len(x) == len(x_hat) + res = [] + for idx in range(len(x)): + res.append(_snr(x[idx], x_hat[idx])) + return res + if x_hat.is_quantized: + x_hat = x_hat.dequantize() + if x.is_quantized: + x = x.dequantize() + noise = (x - x_hat).norm() + if noise == 0: + return 0.0, float('inf'), float('inf') + signal = x.norm() + snr = signal / noise + snr_db = 20 * snr.log10() + return signal, noise, snr_db + +@contextmanager +def override_quantized_engine(qengine): + previous = torch.backends.quantized.engine + torch.backends.quantized.engine = qengine + try: + yield + finally: + torch.backends.quantized.engine = previous + +@contextmanager +def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack): + try: + if qengine_is_qnnpack: + torch._C._set_default_mobile_cpu_allocator() + yield + finally: + if qengine_is_qnnpack: + torch._C._unset_default_mobile_cpu_allocator() + +# TODO: Update all quantization tests to use this decorator. +# Currently for some of the tests it seems to have inconsistent params +# for fbgemm vs qnnpack. +def override_qengines(qfunction): + def test_fn(*args, **kwargs): + for qengine in supported_qengines: + with override_quantized_engine(qengine): + # qfunction should not return anything. + qfunction(*args, **kwargs) + return test_fn + +def qengine_is_fbgemm(): + return torch.backends.quantized.engine == 'fbgemm' +def qengine_is_qnnpack(): + return torch.backends.quantized.engine == 'qnnpack' +def qengine_is_onednn(): + return torch.backends.quantized.engine == 'onednn' +def qengine_is_x86(): + return torch.backends.quantized.engine == 'x86' + +# Helper function used to simulate per-channel fake-quant against any axis +def _permute_to_axis_zero(X, axis): + new_axis_list = list(range(X.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = X.permute(tuple(new_axis_list)) + return y, new_axis_list + +# Reference method for fake quantize +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + res = torch.zeros_like(X) + + for i in range(X.size()[0]): + res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) + + per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i] + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +# Reference method for the gradient of the fake quantize operator +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + Xq = torch.zeros_like(X) + for i in range(X.size()[0]): + Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]) + Xq = Xq.permute(tuple(permute_axis_list)) + mask = (Xq >= quant_min) * (Xq <= quant_max) + res = torch.zeros_like(dY) + res[mask] = dY[mask] + return res.to(dtype) + +def to_tensor(X, device): + if not isinstance(X, torch.Tensor): + X = torch.tensor(X) + else: + X = X.clone().detach() + return X.to(device=torch.device(device), dtype=torch.float32) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..b3c3bd4a130e08d24bb10845ff5b8774f731ff1b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py @@ -0,0 +1,581 @@ +# mypy: ignore-errors + +import torch +from torch import Tensor +import itertools + +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from torch.utils import _pytree as pytree +from functools import partial +from torch.utils._mode_utils import no_dispatch, all_same_mode +import torch.autograd.forward_ad as fwAD +from typing import Callable +import re + + +def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor): + elem = wrapper_tensor.elem + metadata_wrapper_tensor = metadata_accessor(wrapper_tensor) + metadata_elem = metadata_accessor(elem) + if metadata_wrapper_tensor == metadata_elem: + return + raise RuntimeError( + f"This operator is not Composite Compliant: the " + f"{metadata_name} of the tensor was modified directly without " + f"going through the PyTorch dispatcher.") + +def check_metadata_consistency(wrapper_tensor, CCT): + # CCT: CompositeCompliantTensor class which is generated using generate_cct + if not isinstance(wrapper_tensor, CCT): + return + things_to_check = { + 'shape': Tensor.size, + 'dtype': lambda x: x.dtype, + 'device': lambda x: x.device, + 'numel': Tensor.numel, + 'stride': Tensor.stride, + 'storage_offset': Tensor.storage_offset, + } + for metadata_name, metadata_accessor in things_to_check.items(): + check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor) + +def is_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided', + 'detach', + 'diagonal', + 'expand', + 'expand_as', + 'movedim', + 'narrow', + 'permute', + 'select', + 'squeeze', + 'transpose', + 't', + 'real', + 'imag', + 'view_as_real', + 'view_as_complex', + 'unflatten', + 'unfold', + 'unsqueeze', + 'view', + 'view_as', + 'unbind', + 'split', + 'split_with_sizes', + 'vsplit', + 'hsplit', + 'tensor_split', + 'chunk', + 'swapaxes', + 'slice', + '_reshape_alias', + '_unsafe_view', + '_conj', + 'alias', + } + +# manually populated from native_functions that have inplace_view: True. +# In the future we will probably be able to grab that list directly +def is_inplace_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided_', + 'detach_', + 'squeeze_', + 'swapaxes_', + 'swapdims_', + 't_', + 'transpose_', + 'unsqueeze_', + } + + +# Introspection please save us +def is_inplace(func): + name = func.overloadpacket.__name__ + if re.match('__i.+__', name): + return True + if re.match('__.+__', name): + return False + return name[-1] == '_' + + +def generate_cct_and_mode(autograd_view_consistency=True): + # This function returns a new class CompositeCompliantTensor + # The two arguments control the behaviour described below. + + # autograd_view_consistency: + # If True, alias result using `set_` if func returns a view + # (See Note [Alias Result]). + # Since Forward AD doesn't work with `set_` + # we disable it by setting alias to False. + + class CompositeCompliantTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + @staticmethod + def __new__(cls, elem, mode, *args, **kwargs): + assert type(elem) is not cls, \ + "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported" + + # The storage of CompositeCompliantTensor should never be used directly + # by a Composite operation; if the Composite + # operator attempts to read from the storage without dispatching then it'll + # raise a RuntimeError due to it being a meta storage. + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=elem.requires_grad, + strides=elem.stride(), storage_offset=elem.storage_offset()) + + if elem.requires_grad: + # CompositeCompliantTensor steals the "requires_grad"-ness. + # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests... + tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype, + device=elem.device, layout=elem.layout, + requires_grad=False) + tmp.copy_(elem.detach()) + r.elem = tmp + else: + r.elem = elem + + assert r.stride() == r.elem.stride() + + # Propagate conjugate bits to the wrapper tensor + # Ref: https://github.com/albanD/subclass_zoo/issues/24 + # Ref: https://github.com/albanD/subclass_zoo/issues/21 + torch._C._set_conj(r, r.elem.is_conj()) + torch._C._set_neg(r, r.elem.is_neg()) + + r.mode = mode + return r + + def __repr__(self): + return f"CompositeCompliantTensor({self.elem})" + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + all_args = pytree.arg_tree_leaves(*args, **(kwargs or {})) + modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor)) + if not all_same_mode(modes): + raise RuntimeError("Multiple CompositeCompliantTensorModes NYI") + with modes[0]: + return func(*args, **kwargs) + + class CompositeCompliantTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, CompositeCompliantTensor) else e + + def wrap(e): + return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e + + if func == torch.ops.aten._local_scalar_dense.default: + raise RuntimeError( + ".item() is not allowed to be called inside of composite " + "functions in the PyTorch library because not all backends " + "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.") + + if func.overloadpacket.__name__ in ('set_', 'resize_'): + raise RuntimeError( + f"{func.__name__} is not allowed to be called inside of " + f"Composite operators.") + + if is_inplace(func): + # NB: We are making an assumption that if the function is in-place, + # then the first argument is being written to. Introspection please save us! + mutated_argument = args[0] + if not isinstance(mutated_argument, CompositeCompliantTensor) and \ + any(isinstance(a, CompositeCompliantTensor) for a in args[1:]): + raise RuntimeError( + 'Not composite compliant: performing in-place operation ' + f'{func.__name__} where the Tensor being written to is ' + 'regular Tensor but the other tensors are Tensor Subclasses. ' + 'Please try to avoid this in-place operation.') + + unwrapped_args = tree_map(unwrap, args) + unwrapped_kwargs = tree_map(unwrap, kwargs) + unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs) + rs = tree_map(wrap, unwrapped_rs) + + if is_view_fn(func) and autograd_view_consistency: + # Note [Alias Result] + # Autograd asserts that for B = A.view_fn(...), B and A's storages + # are the same. Here we try to make B alias A to avoid those asserts. + # See https://github.com/pytorch/pytorch/issues/65339 for more information + # about the issue. + with no_dispatch(): + # Idea: this is a weird way of getting a storage that aliases the input. + # This is a workaround for #65339. + # 1. under no_dispatch, all of the wrapper tensors look like regular + # tensors with special storage (the storage is nullptr and + # advertises CPU/CUDA device. + # 2. we run func, which ends up running the view operation + # 3. All view operations reuse the input's storage and return + # result Tensor(s) with new sizes/strides/offset that alias + # the input. + # 4. we set the storage (and sizes/strides/offset) of the wrapper + # tensor results to be that of the tensors that alias the input + result = func(*args, **kwargs) + if isinstance(result, (tuple, list)): + for a, b in zip(rs, result): + a.set_(b) + else: + rs.set_(result) + + # Some operations are allowed to in-place modify the metadata of the + # inputs. The only ones are the "inplace view functions"; when we + # run into these, we manually modify the metadata of the input. + with no_dispatch(): + if is_inplace_view_fn(func): + func(*args, **kwargs) + + # For each CompositeCompliantTensor t, we check that t and t.elem + # have consistent metadata. If they don't have consistent metadata, + # that means the operator did something fishy. + check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor) + pytree.tree_map_(check, args) + pytree.tree_map_(check, kwargs) + pytree.tree_map_(check, rs) + return rs + + return CompositeCompliantTensor, CompositeCompliantTensorMode() + +def is_tensorlist(lst): + if not isinstance(lst, list) and not isinstance(lst, tuple): + return False + if len(lst) == 0: + return False + all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst) + if all_tensors: + return True + exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst) + if exists_one_tensor: + raise RuntimeError('This test assumes that PyTorch APIs cannot take ' + 'mixed lists of Tensor and other things') + return False + + +def maybe_map(fn, should_map, arg): + return fn(arg) if should_map else arg + + +def wrap(arg, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + if isinstance(arg, torch.Tensor): + return CCT(arg, cct_mode) + if is_tensorlist(arg): + return [CCT(a, cct_mode) for a in arg] + raise RuntimeError("wrap assumes that the input can be wrapped") + + +# Given a list of flat arguments, some of which may be Tensors, return all +# possible ways some of the arguments could be CompositeCompliantTensors (CCT). +# For example, given Tensors A, B, C and flat_args = [A, 1, B], +# We would return the following 4 options: +# [CCT(A), 1, CCT(B)] +# [CCT(A), 1, B] +# [A, 1, CCT(B)] +# [A, 1, B] +# NB: Yes, this is exponential. No, we don't care too much because PyTorch ops +# don't accept that many input Tensors. +def generate_subclass_choices(flat_args, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args] + subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes] + + for which_args_are_wrapped in itertools.product(*subclass_options): + + result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg) + for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)] + yield result, which_args_are_wrapped + + +# For an operation f(*args, **kwargs), each Tensor argument may either be +# a regular Tensor or a Tensor Subclass. This iterator iterates through +# all of those options. +def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + flat_kwargs, spec = tree_flatten(kwargs) + flat_args_kwargs = list(args) + list(flat_kwargs) + for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode): + new_args = choice[:len(args)] + new_kwargs = tree_unflatten(choice[len(args):], spec) + which_args_are_wrapped = debug_metadata[:len(args)] + which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec) + yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped + + +def raise_composite_compliance_error(err, additional_info=''): + raise RuntimeError( + "Composite compliance check failed with " + "the above error.\n" + f"{additional_info}" + "If you are adding an OpInfo of an " + "existing operator, please feel free to skip this test " + "because the problem was pre-existing and file an issue. " + "Otherwise, if you added a new operator, please read " + "through the Composite Compliance section in " + "aten/src/ATen/native/README.md for how to resolve this. " + ) from err + + +# This test checks ALL possible permutations of calling `op` with arguments +# that are individually either a regular Tensor or a Tensor subclass. +# +# The general strategy is to wrap some Tensor args and kwargs in +# CompositeCompliantTensor wrappers and call the operation. + +# If some composite operation does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_all_permutations(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + expected = op(*args, **kwargs) + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + try: + actual = op(*new_args, **new_kwargs) + # NOTE: [What errors are Composite Compliance trying to catch?] + # + # There's two things we want to catch: + # - errors that would raise within the torch_dispatch impl + # - data_ptr accesses + # The first is easy to filter for (we could make the error a different + # error class), the second is always going to be a RuntimeError due to + # how it is implemented (if you try to access the data_ptr of thex + # wrapper Tensor, it raises you some internal RuntimeError). + # + # So the most general thing to catch here was RuntimeError. If you + # are here and debugging why your test failed, it's plausible that + # the operator itself is broken and that there are other tests failing. + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +# Checks via the usage of torch dispatch mode certain anti-patterns that +# are not composite compliant. +# +# In particular, the anti-pattern we are trying to prevent is a user +# creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps +# here because all factory functions will create tensors that are +# CompositeCompliantTensor. +# +# The general strategy is to wrap all Tensor args and kwargs in +# CompositeCompliantTensor wrappers. If an operator that is +# Composite does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_with_mode(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + + def wrap(e): + return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e + + expected = op(*args, **kwargs) + + args = tree_map(wrap, args) + kwargs = tree_map(wrap, kwargs) + try: + with cct_mode: + actual = op(*args, **kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error(err) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +def gather_leaf_tensors(args, kwargs): + leaf_tensors = [] + args, args_spec = tree_flatten(args) + kwargs, kwargs_spec = tree_flatten(kwargs) + args = args + kwargs + for arg in args: + if not isinstance(arg, torch.Tensor): + continue + if arg.requires_grad: + leaf_tensors.append(arg) + return leaf_tensors + + +def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None): + if gradcheck_wrapper is None: + results = op(*args, **kwargs) + else: + results = gradcheck_wrapper(op, *args, **kwargs) + + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] + leaf_tensors = gather_leaf_tensors(args, kwargs) + assert len(leaf_tensors) > 0 + return torch.autograd.grad(flat_diff_results, leaf_tensors, + grads, allow_unused=True, retain_graph=True) + + +# Checks if the backward formula is composite compliant by testing +# all possible permutations of {inputs, grad_outputs} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_backward_formula to things that aren't OpInfos +# while debugging. +def check_backward_formula(op: Callable, args, kwargs, + output_process_fn_grad=None, + gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode() + + expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper) + + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + leaf_tensors = gather_leaf_tensors(new_args, new_kwargs) + assert len(leaf_tensors) > 0 + + try: + if gradcheck_wrapper is None: + results = op(*new_args, **new_kwargs) + else: + results = gradcheck_wrapper(op, *new_args, **new_kwargs) + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + # NB: ones, not ones_like, so we get a regular Tensor here + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) + for r in flat_diff_results] + for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode): + try: + actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads, + allow_unused=True, retain_graph=True) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_grads: {which_grad_is_batched}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True) + +# Checks if the forward AD formula is composite compliant by testing +# all possible permutations of {primals, tangents} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_forward_ad_formula to things that aren't OpInfos +# while debugging. +def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False) + + def maybe_tangent(t): + assert type(t) is not CCT + # Generate `tangent` tensor + # if given object is a Tensor and requires grad is set. + if isinstance(t, torch.Tensor) and t.requires_grad: + return torch.randn_like(t) + elif is_tensorlist(t): + return [torch.randn_like(e) if e.requires_grad else None for e in t] + return None + + tangent_args = tuple(maybe_tangent(arg) for arg in args) + flat_kwargs, spec = tree_flatten(kwargs) + flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs) + tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec) + + with fwAD.dual_level(): + def maybe_make_dual(dual): + # Returns dual tensor if primal is a tensor/tensor subclass + # with requires_grad set. + primal, tangent = dual + if isinstance(primal, torch.Tensor) and primal.requires_grad: + return fwAD.make_dual(primal.detach(), tangent) + elif is_tensorlist(primal): + return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri + for pri, tang in zip(primal, tangent)) + return primal + + def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs): + op_args = tuple(map(maybe_make_dual, zip(args, tangent_args))) + op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()} + + if gradcheck_wrapper is None: + return op(*op_args, **op_kwargs) + return gradcheck_wrapper(op, *op_args, **op_kwargs) + + expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs) + expected = tree_map(fwAD.unpack_dual, expected) + expected_primals = tree_map(lambda x: x.primal, expected) + expected_tangents = tree_map(lambda x: x.tangent, expected) + + # Permutations of arg and kwargs in CCT. + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + # Permutations tangent arg and tangent kwargs in CCT. + for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode): + new_tang_args, new_tang_kwargs, \ + which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice + + op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args))) + op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()} + + try: + if gradcheck_wrapper is None: + actual = op(*op_args, **op_kwargs) + else: + actual = gradcheck_wrapper(op, *op_args, **op_kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n" + f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + actual = tree_map(fwAD.unpack_dual, actual) + actual_primals = tree_map(lambda x: unwrap(x.primal), actual) + actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual) + assert_equal_fn(actual_primals, expected_primals, equal_nan=True) + assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py new file mode 100644 index 0000000000000000000000000000000000000000..f15e8312aa5a41c4be0e5422ce198d4673302f14 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py @@ -0,0 +1,586 @@ +# mypy: allow-untyped-decorators +# mypy: allow-untyped-defs +import torch +import functools +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np +from torch.testing._internal.autograd_function_db import ( + sample_inputs_numpy_cube, + sample_inputs_numpy_mul, + sample_inputs_numpy_mul_scalar, + sample_inputs_numpy_sort, + sample_inputs_numpy_take, +) +from torch import Tensor +from torch.types import Number +from typing import * # noqa: F403 + +# Note: [custom op db] +# +# This is a collection of custom operator test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if subsystems +# support them correctly. + +def to_numpy(tensor): + return tensor.cpu().numpy() + +@torch.library.custom_op("_torch_testing::numpy_cube", mutates_args=()) +def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]: + x_np = to_numpy(x) + dx = torch.tensor(3 * x_np ** 2, device=x.device) + return torch.tensor(x_np ** 3, device=x.device), dx + +@numpy_cube.register_fake +def _(x): + return x.clone(), x.clone() + +def numpy_cube_setup_context(ctx, inputs, output): + x, = inputs + cube, dx = output + ctx.save_for_backward(x, dx) + +def numpy_cube_backward(ctx, grad_out, grad_dx): + x, dx = ctx.saved_tensors + grad_x = numpy_mul(grad_out, dx) + 6 * numpy_mul(grad_dx, x) + return grad_x + +numpy_cube.register_autograd(numpy_cube_backward, setup_context=numpy_cube_setup_context) + +def numpy_cube_vmap(info, in_dims, x): + result = numpy_cube(x) + return result, (in_dims[0], in_dims[0]) + +numpy_cube.register_vmap(numpy_cube_vmap) + +@torch.library.custom_op("_torch_testing::numpy_mul", mutates_args=()) +def numpy_mul(x: Tensor, y: Tensor) -> Tensor: + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + +@numpy_mul.register_fake +def _(x, y): + assert x.device == y.device + return (x * y).contiguous() + +def numpy_mul_setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + +def numpy_mul_backward(ctx, grad_out): + x, y = ctx.saved_tensors + grad_x = grad_out * y if ctx.needs_input_grad[0] else None + grad_y = grad_out * x if ctx.needs_input_grad[1] else None + return grad_x, grad_y + +numpy_mul.register_autograd(numpy_mul_backward, setup_context=numpy_mul_setup_context) + +def numpy_mul_vmap(info, in_dims, x, y): + x_bdim, y_bdim = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) + result = x * y + result = result.movedim(-1, 0) + return result, 0 + +numpy_mul.register_vmap(numpy_mul_vmap) + +@torch.library.custom_op("_torch_testing::numpy_mul_scalar", mutates_args=()) +def numpy_mul_scalar(x: Tensor, *, scalar: float) -> Tensor: + return torch.tensor(to_numpy(x) * scalar, device=x.device) + +@numpy_mul_scalar.register_fake +def _(x, *, scalar): + return (x * scalar).contiguous() + +def numpy_mul_scalar_setup_context(ctx, inputs, keyword_only_inputs, output): + ctx.scalar = keyword_only_inputs["scalar"] + +def numpy_mul_scalar_backward(ctx, grad_out): + grad_x = grad_out * ctx.scalar + return grad_x + +numpy_mul_scalar.register_autograd(numpy_mul_scalar_backward, setup_context=numpy_mul_scalar_setup_context) + +def numpy_mul_scalar_vmap(info, in_dims, x, *, scalar): + x_bdim, = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + result = x * scalar + result = result.movedim(-1, 0) + return result, 0 + +numpy_mul_scalar.register_vmap(numpy_mul_scalar_vmap) + +@torch.library.custom_op("_torch_testing::numpy_sort", mutates_args=()) +def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]: + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(result, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + +@numpy_sort.register_fake +def _(x, dim): + return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long) + +def numpy_sort_setup_context(ctx, inputs, output): + out, ind, ind_inv = output + ctx.dim = inputs[1] + ctx.save_for_backward(ind, ind_inv) + ctx.mark_non_differentiable(ind, ind_inv) + +def numpy_sort_backward(ctx, grad_out, grad_ind, grad_ind_inv): + ind, ind_inv = ctx.saved_tensors + return numpy_take(grad_out, ind_inv, ind, ctx.dim), None + +numpy_sort.register_autograd(numpy_sort_backward, setup_context=numpy_sort_setup_context) + +def numpy_sort_vmap(info, in_dims, x, dim): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + dim = dim if dim >= 0 else dim + x.dim() - 1 + result = numpy_sort(x, dim + 1) + return result, (0, 0, 0) + +numpy_sort.register_vmap(numpy_sort_vmap) + +@torch.library.custom_op("_torch_testing::numpy_take", mutates_args=()) +def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor: + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + +@numpy_take.register_fake +def _(x, ind, ind_inv, dim): + assert x.device == ind.device + assert x.device == ind_inv.device + assert ind.dtype == torch.long + assert ind_inv.dtype == torch.long + return torch.empty_like(x) + +def numpy_take_setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.dim = dim + ctx.save_for_backward(ind, ind_inv) + +def numpy_take_backward(ctx, grad_out): + ind, ind_inv = ctx.saved_tensors + grad_x = numpy_take(grad_out, ind_inv, ind, ctx.dim) + return grad_x, None, None, None + +numpy_take.register_autograd(numpy_take_backward, setup_context=numpy_take_setup_context) + +def numpy_take_vmap(info, in_dims, x, ind, ind_inv, dim): + x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims + + # wrap dim + logical_dim = x.dim() if x_bdim is None else x_bdim - 1 + dim = dim if dim >= 0 else dim + logical_dim + + def expand_bdim(x, x_bdim): + if x_bdim is None: + return x.expand(info.batch_size, *x.shape) + return x.movedim(x_bdim, 0) + + x = expand_bdim(x, x_bdim) + ind = expand_bdim(ind, ind_bdim) + ind_inv = expand_bdim(ind_inv, ind_inv_bdim) + + return numpy_take(x, ind, ind_inv, dim + 1), 0 + +numpy_take.register_vmap(numpy_take_vmap) + +@torch.library.custom_op("_torch_testing::numpy_nonzero", mutates_args=()) +def numpy_nonzero(x: Tensor) -> Tensor: + x_np = to_numpy(x) + res = np.stack(np.nonzero(x_np), axis=1) + if res.shape[0] <= 1: + raise RuntimeError("not supported") + return torch.tensor(res, device=x.device) + +@numpy_nonzero.register_fake +def _(x): + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + shape = [i0, x.dim()] + result = x.new_empty(shape, dtype=torch.long) + return result + +def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = 10 + result = make_arg(shape, low=0.9, high=2) + mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long) + with torch.no_grad(): + result *= mask + + yield SampleInput(result, args=()) + +def numpy_nonzero_vmap(info, in_dims, x): + raise NotImplementedError("Operator is data-dependent and cannot be vmapped.") + +numpy_nonzero.register_vmap(numpy_nonzero_vmap) + +@torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=()) +def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor: + return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device) + +@numpy_view_copy.register_fake +def _(x, shape) -> Tensor: + return x.clone().view(shape).clone() + +def numpy_view_copy_setup_context(ctx, inputs, output) -> None: + ctx.x_shape = inputs[0].shape + +def numpy_view_copy_backward(ctx, grad_out): + return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None + +numpy_view_copy.register_autograd(numpy_view_copy_backward, setup_context=numpy_view_copy_setup_context) + +def numpy_view_copy_vmap(info, in_dims, x, shape): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + x_shape = x.shape[0] + batch_shape = (x_shape, *shape) + result = numpy_view_copy(x, batch_shape) + return result, 0 + +numpy_view_copy.register_vmap(numpy_view_copy_vmap) + +def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + result = make_arg(2, 3, 4, low=0.9, high=2) + yield SampleInput(result, args=([2, 12],)) + +@torch.library.custom_op('_torch_testing::numpy_cat', mutates_args=()) +def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor: + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + np_xs = [to_numpy(x) for x in xs] + np_out = np.concatenate(np_xs, axis=dim) + return torch.tensor(np_out, device=xs[0].device) + +@numpy_cat.register_fake +def _(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + return torch.cat(xs, dim=dim) + +def numpy_cat_setup_context(ctx, inputs, output): + xs, dim = inputs + ctx.dim_sizes = [x.shape[dim] for x in xs] + ctx.dim = dim + +def numpy_cat_backward(ctx, grad_out): + dim_sizes = ctx.dim_sizes + dim = ctx.dim + + splits = list(np.cumsum(dim_sizes)[:-1]) + grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim) + return grad_xs, None + +numpy_cat.register_autograd(numpy_cat_backward, setup_context=numpy_cat_setup_context) + +def numpy_cat_vmap(info, in_dims, x, dim): + x_bdim, = in_dims + result = numpy_cat(x, dim) + return result, x_bdim + +numpy_cat.register_vmap(numpy_cat_vmap) + +def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + r0 = make_arg(2, 3, 4, low=0.9, high=2) + r1 = make_arg(4, 3, 4, low=0.9, high=2) + r2 = make_arg(5, 3, 4, low=0.9, high=2) + yield SampleInput([r0, r1, r2], args=(0,)) + +@torch.library.custom_op('_torch_testing::numpy_split_copy', mutates_args=()) +def numpy_split_copy(x: Tensor, splits: Sequence[int], dim: int) -> List[Tensor]: + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs] + +@numpy_split_copy.register_fake +def _(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] + +def numpy_split_copy_setup_context(ctx, inputs, output): + _, _, dim = inputs + ctx.dim = dim + +def numpy_split_copy_backward(ctx, grad_out): + result = torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim) + return result, None, None + +numpy_split_copy.register_autograd(numpy_split_copy_backward, setup_context=numpy_split_copy_setup_context) + +def numpy_split_copy_vmap(info, in_dims, x, splits, dim): + x_bdim, _ , _ = in_dims + x = x.movedim(x_bdim, 0) + result = numpy_split_copy(x, splits, dim + 1) + return result, 0 + +numpy_split_copy.register_vmap(numpy_split_copy_vmap) + +def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg(2, 9, low=0.9, high=2) + yield SampleInput(x, args=([1, 3, 6], 1)) + +@torch.library.custom_op('_torch_testing::numpy_split_copy_with_int', mutates_args=()) +def numpy_split_copy_with_int(x: Tensor, splits: Sequence[int], dim: int) -> Tuple[List[Tensor], int]: + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits) + +@numpy_split_copy_with_int.register_fake +def _(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits) + +def numpy_split_copy_with_int_setup_context(ctx, inputs, output): + _, _, dim = inputs + ctx.dim = dim + +def numpy_split_copy_with_int_backward(ctx, grad_out, _): + return torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim), None, None + +numpy_split_copy_with_int.register_autograd( + numpy_split_copy_with_int_backward, + setup_context=numpy_split_copy_with_int_setup_context) + +def numpy_split_copy_with_int_vmap(info, in_dims, x, splits, dim): + x_bdim, _ , _ = in_dims + x = x.movedim(x_bdim, 0) + result, len_split = numpy_split_copy_with_int(x, splits, dim + 1) + return (result, len_split), ([0 for _ in range(len(result))], None) + +numpy_split_copy_with_int.register_vmap(numpy_split_copy_with_int_vmap) + +@torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=()) +def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor: + # Adapted from Ross Girshick's fast-rcnn implementation at + # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py + assert boxes.device == scores.device + device = boxes.device + + boxes = to_numpy(boxes) + scores = to_numpy(scores) + + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= iou_threshold)[0] + order = order[inds + 1] + + result = torch.tensor(np.stack(keep), device=device) + # Needed for data-dependent condition :( + assert result.size(0) >= 2 + return result + +@numpy_nms.register_fake +def _(boxes, scores, iou_threshold): + assert boxes.device == scores.device + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + result = boxes.new_empty([i0], dtype=torch.int64) + return result + +def numpy_nms_vmap(info, in_dims, boxes, scores, iou_threshold): + raise NotImplementedError("Operator is data-dependent and cannot be vmapped.") + +numpy_nms.register_vmap(numpy_nms_vmap) + +def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype) + N = 64 + xs = make_arg([N], low=0, high=28) + dx = make_arg([N], low=0, high=4) + ys = make_arg([N], low=0, high=28) + dy = make_arg([N], low=0, high=4) + boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad) + scores = make_arg([N], low=0, high=1, requires_grad=requires_grad) + iou_threshold = make_arg([], low=0, high=1).item() + + yield SampleInput(boxes, args=(scores, iou_threshold)) + +custom_op_db = [ + OpInfo( + 'NumpyCubeCustomOp', + op=numpy_cube._opoverload, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulCustomOp', + op=numpy_mul._opoverload, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulScalarCustomOp', + op=numpy_mul_scalar._opoverload, + sample_inputs_func=sample_inputs_numpy_mul_scalar, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortCustomOp', + op=numpy_sort._opoverload, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyTakeCustomOp', + op=numpy_take._opoverload, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyNonzeroCustomOp', + op=numpy_nonzero._opoverload, + sample_inputs_func=sample_inputs_numpy_nonzero, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyNMSCustomOp', + op=torch.ops._torch_testing.numpy_nms, + sample_inputs_func=sample_inputs_numpy_nms, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyViewCopyCustomOp', + op=torch.ops._torch_testing.numpy_view_copy, + sample_inputs_func=sample_inputs_numpy_view_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + supports_out=False, + ), + OpInfo( + 'NumpyCatCustomOp', + op=torch.ops._torch_testing.numpy_cat, + sample_inputs_func=sample_inputs_numpy_cat, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyCustomOp', + op=torch.ops._torch_testing.numpy_split_copy, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyWithIntCustomOp', + op=torch.ops._torch_testing.numpy_split_copy_with_int, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0], + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), +] + + +# ============================================================== +# some mechanical test cases +# ============================================================== + +lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901 + +lib.define("source0(Tensor x) -> Tensor") + +@torch.library.register_fake("_torch_testing::source0", lib=lib) +def _(x): + return x.clone() + +lib.define("source1(Tensor x) -> Tensor") + +def source1_fake(x): + return x.clone() + +torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib) + +lib.define("source2(Tensor x) -> Tensor") + +@torch.library.register_fake("_torch_testing::source2", lib=lib) +def _(x): + return x.clone() + +lib.define("source3(Tensor x) -> Tensor") + +def source3_fake(x): + return x.clone() + +torch.library.register_fake("_torch_testing::source3", source3_fake, lib=lib) + + +@torch.library.custom_op("_torch_testing::source4", mutates_args=()) +def source4(x: Tensor) -> Tensor: + return x.clone() + +@source4.register_fake +def _(x): + return x.clone() + +@torch.library.custom_op("_torch_testing::source5", mutates_args=()) +def source5(x: Tensor) -> Tensor: + return x.clone() + +def source5_fake(x): + return x.clone() + +source5.register_fake(source5_fake) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..3b8ff377ea43dd6f4e4f93fe436a616888f85267 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py @@ -0,0 +1,67 @@ +# mypy: ignore-errors + +import torch +import torch.utils._pytree as pytree +from torch.utils._python_dispatch import return_and_correct_aliasing + + +# A simple tensor subclass that holds a tensor with custom metadata and custom method +class ConstantExtraMetadataTensor(torch.Tensor): + @staticmethod + def __new__(cls, elem): + shape = elem.shape + kwargs = {} + kwargs["strides"] = elem.stride() + kwargs["storage_offset"] = elem.storage_offset() + kwargs["device"] = elem.device + kwargs["layout"] = elem.layout + kwargs["requires_grad"] = elem.requires_grad + kwargs["dtype"] = elem.dtype + return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) + + def __init__(self, elem): + self.elem = elem + self.constant_attribute = 4 + + def __repr__(self): + inner_repr = repr(self.elem) + return f"CustomTensor({inner_repr})" + + def __tensor_flatten__(self): + return ["elem"], self.constant_attribute + + def add_constant(self, a): + self.constant_attribute += a + + @staticmethod + def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): + assert meta is not None + elem = inner_tensors["elem"] + out = ConstantExtraMetadataTensor(elem) + out.constant_attribute = meta + return out + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + if kwargs is None: + kwargs = {} + args_inner = pytree.tree_map_only( + ConstantExtraMetadataTensor, lambda x: x.elem, args + ) + + kwargs_inner = pytree.tree_map_only( + ConstantExtraMetadataTensor, lambda x: x.elem, kwargs + ) + + out_inner = func(*args_inner, **kwargs_inner) + out_inner_flat, spec = pytree.tree_flatten(out_inner) + # for aten ops that return non-tensors, just assume that + # our cust inner tensors return the same value + out_flat = [ + ConstantExtraMetadataTensor(o_inner) + if isinstance(o_inner, torch.Tensor) + else o_inner + for o_inner in out_inner_flat + ] + out = pytree.tree_unflatten(out_flat, spec) + return return_and_correct_aliasing(func, args, kwargs, out) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..21a1f2011e6f322154ca7c591907ed1518c7cd20 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py @@ -0,0 +1,200 @@ +# mypy: ignore-errors + +import re +import sys +import time +from functools import partial, wraps +from typing import Tuple + +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch.distributed.rpc import _rref_context_get_debug_info +from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN + + +if not dist.is_available(): + print("c10d not available, skipping tests", file=sys.stderr) + sys.exit(0) + + +INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}" + +def dist_init( + old_test_method=None, + setup_rpc: bool = True, + clean_shutdown: bool = True, + faulty_messages=None, + messages_to_delay=None, +): + """ + We use this decorator for setting up and tearing down state since + MultiProcessTestCase runs each `test*` method in a separate process and + each process just runs the `test*` method without actually calling + 'setUp' and 'tearDown' methods of unittest. + + Note: pass the string representation of MessageTypes that should be used + with the faulty agent's send function. By default, all retriable messages + ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is + set from faulty_rpc_agent_test_fixture.py). + """ + # If we use dist_init without arguments (ex: @dist_init), old_test_method is + # appropriately set and we return the wrapper appropriately. On the other + # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)), + # old_test_method is None and we return a functools.partial which is the real + # decorator that is used and as a result we recursively call dist_init with + # old_test_method and the rest of the arguments appropriately set. + if old_test_method is None: + return partial( + dist_init, + setup_rpc=setup_rpc, + clean_shutdown=clean_shutdown, + faulty_messages=faulty_messages, + messages_to_delay=messages_to_delay, + ) + + @wraps(old_test_method) + def new_test_method(self, *arg, **kwargs): + # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted + # in tests. + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = False + self.worker_id = self.rank + self.setup_fault_injection(faulty_messages, messages_to_delay) + + rpc_backend_options = self.rpc_backend_options + if setup_rpc: + if TEST_WITH_TSAN: + # TSAN runs much slower. + rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5 + rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60 + + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + return_value = old_test_method(self, *arg, **kwargs) + + if setup_rpc: + rpc.shutdown(graceful=clean_shutdown) + + return return_value + + return new_test_method + + +def noop() -> None: + pass + + +def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str: + """ + Loops until an RPC to the given rank fails. This is used to + indicate that the node has failed in unit tests. + Args: + rank (int): Rank of the node expected to fail + expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure + occurs, not just any. + """ + while True: + try: + rpc.rpc_sync(f"worker{rank}", noop, args=()) + time.sleep(0.1) + except Exception as e: + if re.search(pattern=expected_error_regex, string=str(e)): + return str(e) + + +def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None: + """ + The RRef protocol holds forkIds of rrefs in a map until those forks are + confirmed by the owner. The message confirming the fork may arrive after + our tests check whether this map is empty, which leads to failures and + flaky tests. to_here also does not guarantee that we have finished + processind the owner's confirmation message for the RRef. This function + loops until the map is empty, which means the messages have been received + as processed. Call this function before asserting the map returned by + _get_debug_info is empty. + """ + start = time.time() + while True: + debug_info = _rref_context_get_debug_info() + num_pending_futures = int(debug_info["num_pending_futures"]) + num_pending_users = int(debug_info["num_pending_users"]) + if num_pending_futures == 0 and num_pending_users == 0: + break + time.sleep(0.1) + if time.time() - start > timeout: + raise ValueError( + f"Timed out waiting to flush pending futures and users, " + f"had {num_pending_futures} pending futures and {num_pending_users} pending users" + ) + + +def get_num_owners_and_forks() -> Tuple[str, str]: + """ + Retrieves number of OwnerRRefs and forks on this node from + _rref_context_get_debug_info. + """ + rref_dbg_info = _rref_context_get_debug_info() + num_owners = rref_dbg_info["num_owner_rrefs"] + num_forks = rref_dbg_info["num_forks"] + return num_owners, num_forks + + +def wait_until_owners_and_forks_on_rank( + num_owners: int, num_forks: int, rank: int, timeout: int = 20 +) -> None: + """ + Waits until timeout for num_forks and num_owners to exist on the rank. Used + to ensure proper deletion of RRefs in tests. + """ + start = time.time() + while True: + num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync( + worker_name(rank), get_num_owners_and_forks, args=(), timeout=5 + ) + num_owners_on_rank = int(num_owners_on_rank) + num_forks_on_rank = int(num_forks_on_rank) + if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks: + return + time.sleep(1) + if time.time() - start > timeout: + raise ValueError( + f"Timed out waiting {timeout} sec for {num_owners} owners and {num_forks} forks on rank," + f" had {num_owners_on_rank} owners and {num_forks_on_rank} forks" + ) + + +def initialize_pg(init_method, rank: int, world_size: int) -> None: + # This is for tests using `dist.barrier`. + if not dist.is_initialized(): + dist.init_process_group( + backend="gloo", + init_method=init_method, + rank=rank, + world_size=world_size, + ) + + +def worker_name(rank: int) -> str: + return f"worker{rank}" + + +def get_function_event(function_events, partial_event_name): + """ + Returns the first event that matches partial_event_name in the provided + function_events. These function_events should be the output of + torch.autograd.profiler.function_events(). + + Args: + function_events: function_events returned by the profiler. + event_name (str): partial key that the event was profiled with. + """ + event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015 + return event diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..acc7005c6b9e3d64d1ca50714839b0732d41b5a5 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py @@ -0,0 +1 @@ +# mypy: allow-untyped-defs diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..303d06063fda26d56bf999b3041f779492f16af3 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,98 @@ +# mypy: allow-untyped-defs + +import sys +from functools import wraps, partial + +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + tp_transports, +) + +TEST_GPU_NUM = 4 + +class ShardedTensorTestBase(MultiProcessTestCase): + @property + def world_size(self): + return TEST_GPU_NUM + + def init_pg(self, backend="nccl"): + if backend not in ["nccl", "gloo", "mpi"]: + raise RuntimeError(f"Backend {backend} not supported!") + + dist.init_process_group( + backend=backend, + world_size=self.world_size, + rank=self.rank, + init_method=f"file://{self.file_name}", + ) + + # set device for nccl pg for collectives + if backend == "nccl": + torch.cuda.set_device(self.rank) + + + def init_rpc(self): + rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports()) + rpc_backend_options.init_method = f"file://{self.file_name}" + for rank in range(self.world_size): + rpc_backend_options.set_device_map( + f"worker{rank}", {rank: self.rank, self.rank: rank} + ) + + rpc.init_rpc( + name="worker%d" % self.rank, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + def init_comms(self, init_rpc=True, backend="nccl"): + if init_rpc: + self.init_rpc() + self.init_pg(backend=backend) + + def destroy_comms(self, destroy_rpc=True): + # Wait for all ranks to reach here before starting shutdown. + dist.barrier() + + if destroy_rpc: + rpc.shutdown() + dist.destroy_process_group() + + def setUp(self) -> None: + super().setUp() + self._spawn_processes() + + def assert_sharded_tensor_equal(self, st1, st2): + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + self.assertEqual(len(st1_local_shards), len(st2_local_shards)) + for i, st1_local_shard in enumerate(st1_local_shards): + self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor) + self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata) + + self.assertEqual(st1.metadata(), st2.metadata()) + self.assertEqual(st1.sharding_spec(), st2.sharding_spec()) + self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards())) + +# wrapper to initialize comms (processgroup + rpc) +def with_comms(func=None, init_rpc=True, backend="nccl"): + if func is None: + return partial( + with_comms, + init_rpc=init_rpc, + backend=backend, + ) + + @wraps(func) + def wrapper(self, *args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < self.world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + self.init_comms(init_rpc=init_rpc, backend=backend) + func(self, *args, **kwargs) + self.destroy_comms(destroy_rpc=init_rpc) + return wrapper diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81baaad4dc2486f8e4aa8a1a8e7a42447afeae5c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a14e292a7d1e1770f5c3ea28d901a9edbed00b11 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py new file mode 100644 index 0000000000000000000000000000000000000000..398b2fd8a36aa2b43e67ae0d161ab0df1c1d51d4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py @@ -0,0 +1,136 @@ +# mypy: allow-untyped-defs + +import builtins + +import torch +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, + EnumerableShardingSpec, + ShardMetadata, +) +from torch.distributed._shard.sharding_spec._internals import ( + get_chunked_dim_size, + get_split_size, +) + + +def generate_chunk_sharding_specs_for_test(sharding_dim): + return [ + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ), + # Test different ordering. (Case 1) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:2/cuda:2", + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ), + # Test different ordering. (Case 2) + ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:3/cuda:3", + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + ], + ), + ] + + +def generate_enumerable_sharding_specs_for_test(): + return [ + EnumerableShardingSpec( + [ + ShardMetadata( + shard_offsets=[0, 0], + shard_sizes=[5, 5], + placement="rank:0/cuda:0", + ), + ShardMetadata( + shard_offsets=[5, 0], + shard_sizes=[5, 5], + placement="rank:1/cuda:1", + ), + ShardMetadata( + shard_offsets=[0, 5], + shard_sizes=[5, 5], + placement="rank:2/cuda:2", + ), + ShardMetadata( + shard_offsets=[5, 5], + shard_sizes=[5, 5], + placement="rank:3/cuda:3", + ), + ] + ) + ] + + +def generate_local_weight_sharding_params_for_test( + local_weight, sharded_dim, gpu_num, spec, rank +): + """ + Shard the local weight based the given spec, so we can compare against + the one from sharded tensor. + + Args: + local_weight: weight matrix to be sharded. + sharded_dim: The dimension which we shard on. + gpu_num: number of ranks. + spec: sharding spec. + rank: # of cuda process. + + Returns: + start_pos: start position of sharded weight on the given rank. + chunk_size: chunk size of sharded weight on the given rank. + """ + sharding_dim_size = local_weight.size(sharded_dim) + split_size = get_split_size(sharding_dim_size, gpu_num) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size + + +def clone_module_parameter(module, param_name): + """ + Clone a parameter from a given existing module. + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned. + param_name (str): Name of the parameter of ``module`` that needs to be cloned. + + Returns: cloned tensor as :class:`torch.nn.Parameter`. + """ + tensor = getattr(module, param_name) + return torch.nn.Parameter(tensor.detach().clone()) + +def gen_binary_op_func(python_op, inplace=False): + src_lines = ['def f(lhs, rhs):'] + if "torch" in python_op: + src_lines.append(f' return {python_op}(lhs, rhs)\n') + elif inplace: + src_lines.append(f' lhs {python_op}= rhs\n return lhs\n') + else: + src_lines.append(f' return lhs {python_op} rhs\n') + + code_str = '\n'.join(src_lines) + g = {'torch': torch} + builtins.exec(code_str, g) + return g["f"] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e7a23b6f52d9ae34fb50f70e117c23ae1c81bf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py @@ -0,0 +1,66 @@ +# mypy: allow-untyped-defs + +import copy +import random +import torch +from torch.distributed._shard import sharded_tensor + +from torch.distributed._shard.sharding_spec import ( + ChunkShardingSpec, +) + +PLACEMENTS = [ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", +] + +DEFAULT_GPU_NUM = 4 + + +def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0): + spec_list = [] + for i in range(len(sharding_dims)): + random.Random(seed + i).shuffle(PLACEMENTS) + spec_list.append( + ChunkShardingSpec( + dim=sharding_dims[i], + placements=copy.deepcopy(PLACEMENTS), + ) + ) + return spec_list + +class MyShardedModel2(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor2 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor2 = None + self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2)) + + +class MyShardedModel1(torch.nn.Module): + def __init__( + self, + spec=None, + group=None, + init_rrefs=True + ) -> None: + super().__init__() + if spec is not None: + self.sharded_tensor1 = sharded_tensor.rand( + spec, 10, 20, process_group=group, init_rrefs=init_rrefs + ) + else: + self.sharded_tensor1 = None + self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2)) + self.submodule = MyShardedModel2(spec, group, init_rrefs) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..26bb19dfd68e2fced32337ecabf58a79cddf46e2 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py @@ -0,0 +1,42 @@ +# mypy: allow-untyped-defs + +import torch +import torch.nn as nn + +from torch.distributed._shard.sharded_tensor import ShardedTensor + + +class SimpleMegatronLM(nn.Module): + def __init__(self, linear_size, rank=None, dtype=torch.float32): + super().__init__() + self.fc1 = nn.Linear(*linear_size[0], dtype=dtype) + self.gelu = nn.GELU() + self.fc2 = nn.Linear(*linear_size[1], dtype=dtype) + if rank is not None: + self.fc1.cuda(rank) + self.fc2.cuda(rank) + + def forward(self, inp): + return self.fc2(self.gelu(self.fc1(inp))) + + def get_weights(self): + if isinstance(self.fc1.weight, ShardedTensor): + weight1 = self.fc1.weight.local_tensor() + else: + weight1 = self.fc1.weight + + if isinstance(self.fc2.weight, ShardedTensor): + weight2 = self.fc2.weight.local_tensor() + else: + weight2 = self.fc2.weight + + return (weight1, weight2) + + def get_biases(self): + return (self.fc1.bias, self.fc2.bias) + + def get_weight_grads(self): + return (self.fc1.weight.grad, self.fc2.weight.grad) + + def get_bias_grads(self): + return (self.fc1.bias.grad, self.fc2.bias.grad) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b49bbc925583c001dbfaefb8ed328f73102da2fc --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py @@ -0,0 +1,51 @@ +# mypy: allow-untyped-defs + +# Copyright (c) Meta Platforms, Inc. and affiliates + +import os +import shutil +import tempfile +from functools import wraps +from typing import Any, Callable, Dict, Optional, Tuple + +import torch.distributed as dist + + +def with_temp_dir( + func: Optional[Callable] = None, +) -> Optional[Callable]: + """ + Wrapper to initialize temp directory for distributed checkpoint. + """ + assert func is not None + + @wraps(func) + def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None: + if dist.is_initialized(): + # Only create temp_dir when rank is 0 + if dist.get_rank() == 0: + temp_dir = tempfile.mkdtemp() + print(f"Using temp directory: {temp_dir}") + else: + temp_dir = "" + object_list = [temp_dir] + + # Broadcast temp_dir to all the other ranks + os.sync() + dist.broadcast_object_list(object_list) + self.temp_dir = object_list[0] + os.sync() + else: + temp_dir = tempfile.mkdtemp() + print(f"No process group initialized, using temp directory: {temp_dir}") + self.temp_dir = temp_dir + + try: + func(self, *args, **kwargs) + finally: + if dist.is_initialized() and dist.get_rank() == 0: + shutil.rmtree(self.temp_dir, ignore_errors=True) + else: + shutil.rmtree(self.temp_dir, ignore_errors=True) + + return wrapper diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py new file mode 100644 index 0000000000000000000000000000000000000000..981c8e59580649a838c924b87d7e1478ef00de91 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py @@ -0,0 +1,10423 @@ +# mypy: allow-untyped-defs + +import copy +import json +import itertools +import math +import os +import random +import sys +import tempfile +import time +from collections import namedtuple, OrderedDict, defaultdict +from contextlib import contextmanager, nullcontext +from dataclasses import dataclass +from datetime import timedelta +from functools import reduce +from typing import Union, NamedTuple, Callable, Any +import unittest +import numpy as np +import torch +import torch.cuda +import torch.distributed as dist +import torch.distributed.algorithms.model_averaging.averagers as averagers +import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD +import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils +import torch.nn as nn +import torch.nn.functional as F +from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR +from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT +from torch.utils._python_dispatch import TorchDispatchMode +from torch.autograd import DeviceType +from torch.cuda.amp import GradScaler, autocast + +from torch.distributed.algorithms.ddp_comm_hooks import ( + post_localSGD_hook as post_localSGD, + powerSGD_hook as powerSGD, + default_hooks as default, + quantization as quantization_hooks, +) +from torch.distributed.optim import _apply_optimizer_in_backward + +from torch.distributed.distributed_c10d import ( + get_world_size, + _get_default_group, + _get_pg_config, +) +from torch.distributed.utils import ( + _verify_param_shape_across_processes, + _sync_module_states, +) +from torch.profiler import ( + ExecutionTraceObserver, + ProfilerActivity, +) + +from torch.nn.parallel import DistributedDataParallel +from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars, _MixedPrecision +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + TEST_SKIPS, + init_multigpu_helper, + initialize_temp_directories, + cleanup_temp_dir, + simple_sparse_reduce_tests, + skip_if_rocm_multiprocess, + skip_if_small_worldsize, + skip_if_odd_worldsize, + skip_if_lt_x_gpu, + nccl_skip_if_lt_x_gpu, + skip_if_no_gpu, + require_n_gpus_for_nccl_backend, + requires_nccl_version, + captured_output, + with_nccl_blocking_wait, + with_dist_debug_levels, + verify_ddp_error_logged, + DistTestCases, +) +from torch.testing._internal.common_utils import ( + instantiate_parametrized_tests, + IS_MACOS, + IS_WINDOWS, + FILE_SCHEMA, + IS_FBCODE, + NO_MULTIPROCESSING_SPAWN, + IS_SANDCASTLE, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, +) + +import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer + +from torch.utils.data.distributed import DistributedSampler +import operator + +try: + import torchvision + + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False + +if sys.platform == "win32": + import msvcrt +else: + import fcntl + + +class NetWithBuffers(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + self.buffer.add_(1) + return self.b(self.a(x)) + + +class Foo: + def __init__(self, x): + # Can be tensor or int + self.x = x + + def __eq__(self, other): + def eq(value, other): + if isinstance(value, torch.Tensor): + return torch.equal(value, other) + return value == other + + for attr, value in self.__dict__.items(): + other_value = other.__dict__[attr] + if not eq(value, other_value): + return False + return True + + +f = Foo(10) +f.bar = 1 + +foo_cpu_tensor = Foo(torch.randn(3, 3)) + + +COLLECTIVES_OBJECT_TEST_LIST = [ + {"key1": 3, "key2": 4, "key3": {"nested": True}}, + f, + foo_cpu_tensor, + "foo", + [1, 2, True, "string", [4, 5, "nested"]], +] + +# Allowlist of distributed backends where profiling collectives is supported. +PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.NCCL, + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported with use_cuda=True +CUDA_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.GLOO, + dist.Backend.MPI, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Allowlist of distributed backends where profiling is supported for p2p ops +SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [ + dist.Backend.MPI, + dist.Backend.GLOO, + dist.Backend.NCCL, + dist.Backend.UCC, +] + +# Dummy NamedTuple data structures to test DDP support for NamedTuple types. +EXPECTED_FIELDS = ("a", "b") +TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS) + + +class TestNamedTupleInput_1(NamedTuple): + a: torch.tensor + b: torch.tensor + + +skipIfNoTorchVision = skip_but_pass_in_sandcastle_if( + not HAS_TORCHVISION, "no torchvision" +) + +BACKEND = os.environ["BACKEND"] +INIT_METHOD = os.getenv("INIT_METHOD", "env://") + +DEFAULT_TIMEOUT = 300 +CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500} + + +def get_profiling_event(event_name, profiler, dedup_gpu_user_annotation=False): + event_list = ( + profiler.events() + if isinstance(profiler, torch.profiler.profile) + else profiler.function_events + ) + return [ + event for event in event_list + if ( + (event.name.endswith(event_name) or event.name.startswith(event_name)) + and (not dedup_gpu_user_annotation or event.device_type != DeviceType.CUDA) + ) + ] + +def get_profiler_nccl_meta(prof): + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We will need to test metadata obtained from profiler here""" + tf = tempfile.NamedTemporaryFile( + mode="w+t", suffix=".json", delete=False + ) + tf.close() + trace_file = tf.name + + prof.export_chrome_trace(trace_file) + with open(trace_file) as f: + events = json.load(f)["traceEvents"] + print(f"Trace saved to {trace_file}") + + # Comment to debug + os.remove(trace_file) + + return [e for e in events if e.get("name") == "record_param_comms"] + +# Base error message substring on unfinished reductions. +ddp_prev_reduction_unfinished_str = ( + "Expected to have finished reduction in the prior iteration" +) +# Error message substring when find_unused_parameters=True has not been passed +ddp_recommend_find_unused_params_str = ( + "passing the keyword argument `find_unused_parameters=True`" +) +# Error message substring when find_unused_parameters=True is enabled +ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled" +# Error message substring for possibility of not all model outputs being used +# in loss computation +ddp_outputs_not_used_in_loss_str = ( + "`forward` function outputs participate in calculating loss" +) +# Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG +ddp_suggest_debug_mode_str = ( + "set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL" +) + + +class DDPUnevenTestInput(NamedTuple): + name: str + model: nn.Module + inp: Union[torch.tensor, tuple] + sync_interval: int + throw_on_early_termination: bool = False + hook: Callable = None + state: Any = None + + +class _FC2(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc = nn.Linear(10, 50, bias=True) + self.fc.bias.requires_grad = False + + def forward(self, x): + x = self.fc(x) + return x + + +class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(2, 10, bias=False) + self.fc2 = _FC2() + self.fc3 = nn.Linear(50, 4, bias=False) + self.relu = nn.ReLU() + self.no_grad_param = nn.Parameter( + torch.tensor([2, 2]).long(), requires_grad=False + ) + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.relu(self.fc2(x)) + x = self.fc3(x) + return F.softmax(x, dim=1) + + +class LargeNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(1000, 2000, bias=False) + self.fc2 = nn.Linear(2000, 500, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + +class Task(nn.Module): + def __init__(self) -> None: + super().__init__() + self.p = nn.Parameter(torch.ones(2, 2)) + + def forward(self, x): + return self.p + x + + +class BatchNormNet(nn.Module): + def __init__(self, affine=True): + super().__init__() + self.fc1 = nn.Linear(2, 40, bias=False) + self.bn = nn.BatchNorm1d(4, affine=affine) + self.fc2 = nn.Linear(40, 4, bias=False) + + def forward(self, x): + x = torch.reshape(self.fc1(x), (-1, 4, 10)) + x = self.bn(x) + x = torch.reshape(x, (-1, 40)) + x = self.fc2(x) + return F.softmax(x, dim=1) + + +class UnusedParamTwoLinLayerNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 10, bias=False) + self.c = nn.Linear(5, 5, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class DictOutputModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.module = UnusedParamTwoLinLayerNet() + + def forward(self, x): + predictions = self.module(x) + loss = (predictions[0] + predictions[1]).sum() + return { + "predictions": predictions, + "loss": loss, + } + + +class TwoLinLayerNet(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + + def forward(self, x): + a = self.a(x) + b = self.b(x) + return (a, b) + + +class EmbeddingNetDifferentParams(nn.Module): + """ + A module containing an embedding with different dimension or different # of + parameters depending on the rank. + """ + + def __init__(self, rank, diff_num_params=False): + super().__init__() + embedding_dim = 500 if diff_num_params or rank == 0 else 50 + self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim) + self.lin = nn.Linear(embedding_dim, 1) + if diff_num_params: + self.lin2 = nn.Linear(1, 1, bias=False) + + def forward(self, x): + x = self.embedding(x) + return self.lin(x) + + +class ControlFlowToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + # Second layer is used dependent on input x. + use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device)) + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + +DDP_NET = Net() +BN_NET = BatchNormNet() +BN_NET_NO_AFFINE = BatchNormNet(affine=False) +ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99) + + +def get_timeout(test_id): + test_name = test_id.split(".")[-1] + if test_name in CUSTOMIZED_TIMEOUT: + return CUSTOMIZED_TIMEOUT[test_name] + else: + return DEFAULT_TIMEOUT + + +default_pg_timeout = 60 + +CUSTOM_PG_TIMEOUT = { + # This test runs slowly and needs additional time to complete, otherwise can + # be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING + "test_ddp_uneven_inputs": 300, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_model_diff_across_ranks": 5, + # This test has a short timeout since it tests being taken down by + # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. + "test_ddp_has_finalized": 5, +} + +def require_backend_is_available(backends): + def check(backend): + if backend == dist.Backend.GLOO: + return dist.is_gloo_available() + if backend == dist.Backend.NCCL: + return dist.is_nccl_available() + if backend == dist.Backend.MPI: + return dist.is_mpi_available() + if backend == dist.Backend.UCC: + return dist.is_ucc_available() + if backend in DistTestCases.backend_feature["plugin"]: + return True + return False + + if BACKEND not in backends: + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be one of {backends}" + ) + + if not check(dist.Backend(BACKEND)): + return skip_but_pass_in_sandcastle( + f"Test requires backend {BACKEND} to be available" + ) + return lambda func: func + + +def require_world_size(world_size): + if int(os.environ["WORLD_SIZE"]) < world_size: + return skip_but_pass_in_sandcastle( + "Test requires world size of %d" % world_size + ) + return lambda func: func + + +@contextmanager +def _lock(): + TEMP_DIR = os.environ["TEMP_DIR"] + lockfile = os.path.join(TEMP_DIR, "lockfile") + with open(lockfile, "w") as lf: + try: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1) + yield + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_EX) + yield + finally: + if sys.platform == "win32": + msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) + else: + fcntl.flock(lf.fileno(), fcntl.LOCK_UN) + lf.close() + + +@contextmanager +def _rank_temp_file(): + if dist.get_rank() == 0: + fd, name = tempfile.mkstemp() + os.close(fd) + else: + name = None + object_list = [name] + dist.broadcast_object_list(object_list) + name = object_list[0] + try: + yield name + finally: + if dist.get_rank() == 0: + os.remove(name) + + +def _build_tensor(size, value=None, dtype=torch.float, device_id=None): + if value is None: + value = size + if device_id is None: + return torch.empty(size, size, size, dtype=dtype).fill_(value) + else: + return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id) + + +def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float): + if value is None: + value = dim + return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value) + + +def _create_autograd_profiler(): + return torch.autograd.profiler.profile(record_shapes=True) + + +def _create_torch_profiler(): + return torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + ], + record_shapes=True, + ) + + +class Barrier: + barrier_id = 0 + + @classmethod + def init(cls): + cls.barrier_id = 0 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + for f_name in os.listdir(barrier_dir): + os.unlink(os.path.join(barrier_dir, f_name)) + + @classmethod + def sync(cls, wait_for=None, timeout=10): + if wait_for is None: + wait_for = dist.get_world_size() + cls.barrier_id += 1 + barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") + pid = str(os.getpid()) + barrier_file = os.path.join(barrier_dir, pid) + with _lock(): + with open(barrier_file, "w") as f: + f.write(str(cls.barrier_id)) + + start_time = time.time() + while True: + arrived = 0 + with _lock(): + for f_name in os.listdir(barrier_dir): + with open(os.path.join(barrier_dir, f_name)) as f: + data = f.read() + if int(data) >= cls.barrier_id: + arrived += 1 + if arrived == wait_for: + break + + if time.time() - start_time > timeout: + raise RuntimeError("barrier timeout") + time.sleep(0.1) + + +class TestDistBackend(MultiProcessTestCase): + @classmethod + def setUpClass(cls): + os.environ["MASTER_ADDR"] = str(MASTER_ADDR) + # Not setting MASTER_PORT and get a random free port + super().setUpClass() + + def setUp(self): + super().setUp() + # initialize temp directories + initialize_temp_directories() + # initialize Barrier + Barrier.init() + # Skip return code checking for following tests as they are expected to + # crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING. + self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__] + + def tearDown(self): + cleanup_temp_dir() + super().tearDown() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + @classmethod + def _run(cls, rank, test_name, file_name, pipe, **kwargs): + if BACKEND == "nccl" and not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + if torch.cuda.is_available() and torch.cuda.device_count() < int( + self.world_size + ): + sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) + try: + pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout) + timeout = timedelta(seconds=pg_timeout_seconds) + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=int(self.world_size), + rank=self.rank, + timeout=timeout, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + self._barrier() + + self.run_test(test_name, pipe) + self._barrier() + dist.destroy_process_group() + sys.exit(0) + + # Needed since MultiProcessTestCase assumes a world_size of 4, but we + # run these tests under other various world_sizes. + @property + def world_size(self): + return os.environ["WORLD_SIZE"] + + +class DistributedTest: + class _DistTestBase: + def _barrier(self, *args, **kwargs): + Barrier.sync(*args, **kwargs) + + def _init_group_test(self, **kwargs): + group = [1, 2] + group_id = dist.new_group(group, **kwargs) + rank = dist.get_rank() + if rank not in group: + return ([], None, rank) + + return (group, group_id, rank) + + def _init_full_group_test(self, **kwargs): + group = list(range(0, dist.get_world_size())) + group_id = dist.new_group(**kwargs) + rank = dist.get_rank() + return (group, group_id, rank) + + def _init_global_test(self): + group = list(range(0, dist.get_world_size())) + group_id = dist.group.WORLD + rank = dist.get_rank() + return (group, group_id, rank) + + def _verify_buffers_equal(self, m1, m2): + # verify buffers across models + m1_buf_dict = dict(m1.module.named_buffers()) + for name, buf in m2.module.named_buffers(): + self.assertEqual(buf, m1_buf_dict[name]) + + # Verify buffers across ranks. + m1_buffers = list(m1.buffers()) + m2_buffers = list(m2.buffers()) + for (buf1, buf2) in zip(m1_buffers, m2_buffers): + gathered_bufs = [ + torch.empty_like(buf1) for _ in range(dist.get_world_size()) + ] + dist.all_gather(gathered_bufs, buf1) + gathered_bufs_m2 = [ + torch.empty_like(buf2) for _ in range(dist.get_world_size()) + ] + for b in gathered_bufs: + self.assertEqual(b, buf1) + dist.all_gather(gathered_bufs_m2, buf2) + for b in gathered_bufs_m2: + self.assertEqual(b, buf2) + + def _sanity_check_profiler_nccl_meta(self, nccl_meta_events): + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We test for basic fields in this profiler event that correspond to the nccl communication + collectives""" + per_coll_meta = defaultdict(list) + for e in nccl_meta_events: + args = e.get("args", {}) + collname = args.get("Collective name", "") + self.assertNotEqual(collname, "") + self.assertNotEqual(args.get("dtype", ""), "") + + per_coll_meta[collname].append(args) + if collname in {"wait"}: + continue + + self.assertEqual(args["Process Group Description"], "default_pg") + self.assertNotEqual(args["Process Group Ranks"], "") + + self.assertGreaterEqual(args.get("In msg nelems", -1), 0) + self.assertGreaterEqual(args.get("Out msg nelems", -1), 0) + self.assertGreaterEqual(args.get("Group size", -1), 0) + self.assertGreaterEqual(args.get("Global rank start", -1), 0) + self.assertGreaterEqual(args.get("Global rank stride", -1), 0) + + # print(per_coll_meta) + return per_coll_meta + + def test_dump_DDP_relevant_env_vars(self): + with captured_output() as (out, _): + _dump_DDP_relevant_env_vars() + lines = out.getvalue().splitlines() + + def format_line(var): + return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" + + # Check relevant env vars + vars = [ + "MASTER_ADDR", + "MASTER_PORT", + "WORLD_SIZE", + "NCCL_TOPO_DUMP_FILE", # N/A + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + for var in vars: + line = format_line(var) + self.assertIn(line, lines) + # Check irrelevant env vars + vars = [ + "xxx", + "yyy", + "zzz", + ] + for var in vars: + line = format_line(var) + self.assertNotIn(line, lines) + + # GET RANK + def test_get_rank(self): + test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir") + pid = str(os.getpid()) + num_processes = dist.get_world_size() + with open(os.path.join(test_dir, pid), "w") as f: + f.write(str(dist.get_rank())) + + self._barrier() + + all_ranks = set() + for f_name in os.listdir(test_dir): + with open(os.path.join(test_dir, f_name)) as f: + all_ranks.add(int(f.read())) + self.assertEqual(len(all_ranks), num_processes) + + self._barrier() + + if dist.get_rank() == 0: + for f_name in os.listdir(test_dir): + os.unlink(os.path.join(test_dir, f_name)) + + self._barrier() + + def test_get_backend(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + backend_str = BACKEND.lower() + self.assertEqual(dist.get_backend(), backend_str) + if dist.get_rank() in group: + self.assertEqual(dist.get_backend(group_id), backend_str) + else: + with self.assertRaisesRegex( + ValueError, "Invalid process group specified" + ): + dist.get_backend(group_id) + + def test_Backend_enum_class(self): + # test parsing + backend = BACKEND.lower() + self.assertEqual(dist.Backend(BACKEND.upper()), backend) + self.assertEqual(dist.Backend(BACKEND), backend) + with self.assertRaises(ValueError): + dist.Backend(None) + with self.assertRaises(ValueError): + dist.Backend(3) + with self.assertRaises(ValueError): + dist.Backend(["gloo"]) + + # Test destroy + def test_destroy_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of group + def test_get_rank_size_group(self): + if dist.get_world_size() > 2: + group = [1, 2] + else: + group = [0, 1] + group_id = dist.new_group(group) + if dist.get_rank() in group: + self.assertEqual(dist.get_world_size(group_id), 2) + self.assertTrue(dist.get_rank(group_id) in list(range(2))) + else: + self.assertEqual(dist.get_world_size(group_id), -1) + self.assertEqual(dist.get_rank(group_id), -1) + + # Test destroy full groups + def test_destroy_full_group(self): + _, group_id, _ = self._init_full_group_test() + self._barrier() + dist.destroy_process_group(group_id) + + # Test get rank and size of full group + def test_get_rank_size_full_group(self): + _, group_id, _ = self._init_full_group_test() + self.assertEqual(dist.get_world_size(group_id), dist.get_world_size()) + self.assertEqual(dist.get_rank(group_id), dist.get_rank()) + + def _test_barrier_timeout(self, group_id, timeout): + local_rank = dist.get_rank(group_id) + + # Only execute barrier on rank == 0, causing it to timeout + if local_rank == 0: + expected_time = time.time() + timeout.total_seconds() + # In debug mode, we execute a monitored_barrier before the + # collective, so assert on that. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + exception_ctx = self.assertRaisesRegex( + Exception, "failed to pass monitoredBarrier" + ) + else: + exception_ctx = self.assertRaisesRegex( + Exception, " (Timed out|closed|timeout) " + ) + with exception_ctx: + dist.barrier(group_id) + self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1) + else: + pass + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + @skip_but_pass_in_sandcastle_if( + not INIT_METHOD.startswith("file://"), + "Requires file:// initialization method. " + + "Both tcp:// and env:// rely on the TCP store for which " + "reinitialization has proven racy.", + ) + def test_barrier_timeout_global(self): + dist.destroy_process_group() + + # Explicitly pass world size to the barrier because we've + # just destroyed any state in torch.distributed. + self._barrier(wait_for=int(os.environ["WORLD_SIZE"])) + + # Reinitialize global process group + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + self._test_barrier_timeout(dist.group.WORLD, timeout) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_group(self): + timeout = timedelta(seconds=5) + _, group_id, _ = self._init_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only gloo backend supports timeouts" + ) + def test_barrier_timeout_full_group(self): + timeout = timedelta(seconds=1) + _, group_id, _ = self._init_full_group_test(timeout=timeout) + if group_id is not None: + self._test_barrier_timeout(group_id, timeout) + + # This test helper can only be used when using the Gloo or NCCL backend + # **and** both the Gloo and NCCL backends are available. + # See the @skip annotations below. + def _test_group_override_backend(self, initializer): + if BACKEND == "gloo": + new_backend = "nccl" + elif BACKEND == "nccl": + new_backend = "gloo" + elif BACKEND in DistTestCases.backend_feature["plugin"]: + new_backend = "gloo" + + group, group_id, rank = initializer(backend=new_backend) + if group_id is None: + return + + if new_backend == "gloo": + self.assertTrue(group_id._get_backend_name(), "gloo") + if new_backend == "nccl": + self.assertTrue(group_id._get_backend_name(), "nccl") + + self.assertEqual(rank, group[dist.get_rank(group_id)]) + self.assertEqual(len(group), dist.get_world_size(group_id)) + + # Pin device (so we avoid NCCL race conditions/deadlocks). + group_rank = dist.get_rank(group_id) + torch.cuda.set_device(group_rank) + + # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL). + tensor = _build_tensor(2, value=group_rank).cuda() + dist.broadcast(tensor, src=group[0], group=group_id) + self.assertEqual(_build_tensor(2, value=0), tensor.to("cpu")) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_world_size(3) + @skip_if_lt_x_gpu(2) + def test_backend_group(self): + self._test_group_override_backend(self._init_group_test) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @unittest.skipIf(BACKEND == "ucc", "broken, see https://github.com/pytorch/pytorch/pull/113620") + def test_backend_full_group(self): + self._test_group_override_backend(self._init_full_group_test) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(2) + def test_new_subgroups(self): + subgroup_size = 2 + cur_subgroup, subgroups = dist.new_subgroups(subgroup_size) + + world_size = dist.get_world_size() + self.assertEqual(cur_subgroup.size(), subgroup_size) + self.assertEqual(len(subgroups), world_size / subgroup_size) + self.assertFalse(dist._rank_not_in_group(cur_subgroup)) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_group_size_exceeds_world_size(self): + with self.assertRaisesRegex(ValueError, "must not exceed"): + dist.new_subgroups(100) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_world_size_not_divisible_by_group_size(self): + with self.assertRaisesRegex( + ValueError, "The world size must be divisible by 'group_size'" + ): + dist.new_subgroups(3) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + cur_subgroup, subgroups = dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 2], [1, 3]] + ) + if device_id >= 4: + self.assertIsNone(cur_subgroup) + else: + self.assertEqual(cur_subgroup.size(), 2) + self.assertEqual(len(subgroups), 2) + if device_id == 0 or device_id == 2: + self.assertEqual(cur_subgroup, subgroups[0]) + else: + self.assertEqual(cur_subgroup, subgroups[1]) + + for subgroup in subgroups: + dist.destroy_process_group(subgroup) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + world_size = get_world_size(group_id) + + with self.assertRaisesRegex( + RuntimeError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0, 1], [world_size, 2]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_no_gpu + def test_new_subgroups_by_enumeration_negative_input_rank(self): + group, group_id, rank = self._init_global_test() + + with self.assertRaisesRegex( + ValueError, + "The new group's rank should be within the world_size set by init_process_group", + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[-1, -2], [-3, -4]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_new_subgroups_overlap_not_allowed(self): + with self.assertRaisesRegex( + ValueError, "Rank 1 has appeared in both subgroup" + ): + dist.new_subgroups_by_enumeration( + ranks_per_subgroup_list=[[0], [1, 2], [1, 3]] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_average_parameters(self): + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Sequential( + nn.Conv2d(3, 3, kernel_size=3, padding=1), + nn.ReLU(), + nn.Linear(1, 5, bias=False), + ).cuda(device_id) + # Test global model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=None + ) + # Every element will be the same as the input. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data)) + + # Test partial model averaging + for p in model.parameters(): + p.data = torch.ones_like(p.data) * rank + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + model_averaging_utils.average_parameters( + params=model.parameters(), process_group=group_nccl + ) + if not dist._rank_not_in_group(group_nccl): + # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * 0.5) + else: + # Every element on device not in the subgroup should remain the same. + for p in model.parameters(): + self.assertEqual(p.data, torch.ones_like(p.data) * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_if_lt_x_gpu(2) + def test_periodic_model_averager_param_group(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + opt = torch.optim.SGD(model.parameters(), lr=0.1) + + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + for param_group in opt.param_groups: + for params in param_group["params"]: + # mock grad + params.grad = torch.ones_like(param.data) * rank + params.data = torch.ones_like(param.data) * rank + averager.average_parameters(opt.param_groups) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, + torch.ones_like(param.data) + * sum(range(world_size)) + / world_size, + ) + else: + # No model averaging, so the parameters are not updated. + for param_group in opt.param_groups: + for params in param_group["params"]: + if params.grad is None: + continue + self.assertEqual( + param.data, torch.ones_like(param.data) * rank + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @skip_if_lt_x_gpu(2) + def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager( + self, + ): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + expected_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + period = 4 + for warmup_steps in [12, 13, 14, 15]: + averager = hierarchicalSGD.HierarchicalModelAverager( + # Run the global averaging at a period of 4, + # which is equivalent to the above periodic model averaging test case. + period_group_size_dict=OrderedDict([(period, world_size)]), + warmup_steps=warmup_steps, + ) + + averager = averagers.PeriodicModelAverager( + period=period, warmup_steps=warmup_steps + ) + for step in range(0, 20): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step >= warmup_steps and (step - warmup_steps) % period == 0: + self.assertEqual(param.data, expected_avg_tensor) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["subgroup"], + f"The {BACKEND} backend does not support creating subgroups on CUDA devices", + ) + @require_world_size(4) + @skip_if_lt_x_gpu(4) + def test_3_level_hierarchical_model_averager(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + + model = nn.Linear(1, 5, bias=False).cuda(device_id) + param = next(model.parameters()) + tensor = torch.ones_like(param.data) * rank + # Set up such a hierarchical model averaging as follows: + # after the first 10 warmup steps, + # run model averaging every 2 steps within each subgroup of size 2, + # run model averaging every 4 steps within each subgroup of size 3, + # and run the global model averaging every 8 steps. + # If there is a conflict in model averaging at a step, only run the highest-level model averaging. + warmup_steps = 10 + subgroup_size1 = 2 + subgroup_avg_period1 = 2 + subgroup_size2 = 4 + subgroup_avg_period2 = 4 + global_avg_period = 8 + period_group_size_dict = OrderedDict( + [ + (subgroup_avg_period1, subgroup_size1), + (subgroup_avg_period2, subgroup_size2), + (global_avg_period, world_size), + ] + ) + averager = hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps + ) + self.assertEqual(dist.get_pg_count(), len(period_group_size_dict)) + + subgroup1 = averager.period_process_group_dict[subgroup_avg_period1] + subgroup2 = averager.period_process_group_dict[subgroup_avg_period2] + real_group_ranks_res1 = _get_pg_config(subgroup1)['ranks'] + real_group_ranks_res2 = _get_pg_config(subgroup2)['ranks'] + + expect_group_ranks_res1 = ( + rank // subgroup_size1 * subgroup_size1 + + np.array(list(range(subgroup_size1))) + ).tolist() + expect_group_ranks_res2 = ( + rank // subgroup_size2 * subgroup_size2 + + np.array(list(range(subgroup_size2))) + ).tolist() + self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1) + self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2) + + expected_avg_tensor_within_subgroup1 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res1) + / subgroup_size1 + ) + expected_avg_tensor_within_subgroup2 = ( + torch.ones_like(param.data) + * sum(real_group_ranks_res2) + / subgroup_size2 + ) + expected_global_avg_tensor = ( + torch.ones_like(param.data) * sum(range(world_size)) / world_size + ) + for step in range(0, 25): + # Reset the parameters at every step. + param.data = copy.deepcopy(tensor) + for params in model.parameters(): + # mock grad + params.grad = torch.ones_like(param.data) + averager.average_parameters(model.parameters()) + if step == 16 or step == 24: + # Run global model averaging when `step` can be divided by 8. + self.assertEqual(param.data, expected_global_avg_tensor) + elif step == 12 or step == 20: + # Run model averaging within subgroup when `step` can be divided by 4 but not by 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup2) + elif step == 10 or step == 14 or step == 18 or step == 22: + # Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8. + self.assertEqual(param.data, expected_avg_tensor_within_subgroup1) + else: + # No model averaging, so the parameters are not updated. + self.assertEqual(param.data, tensor) + + # Coalescing manager (sync mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(): + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # Coalescing manager (async mode) + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, + "Coalescing manager currently tests with NCCL only; internal test flaky" + ) + def test_coalescing_manager_async(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + num_colls = 2 + size_per_coll = 8 + small_tensors = [ + torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) + ] + + with dist._coalescing_manager(async_ops=True) as cm: + for i in range(num_colls): + dist.all_reduce(small_tensors[i]) + cm.wait() + + big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) + dist.all_reduce(big_tensor) + + for i in range(num_colls): + self.assertEqual( + small_tensors[i], + big_tensor[i * size_per_coll : (i + 1) * size_per_coll] + ) + + self._barrier() + + # NCCL Batch SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + recv_tensors = [None for _ in range(world_size)] + expected_tensors = [None for _ in range(world_size)] + + for val in ["1", "0"]: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val + for src in range(0, world_size): + send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_( + src + ) + recv_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(-1) + expected_tensors[src] = _build_tensor( + src + 1, value=-1, device_id=device_id + ).fill_(rank) + recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + for src in range(0, world_size): + self.assertEqual(recv_tensors[src], expected_tensors[src]) + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_ring_exchange_nccl(self): + self._barrier() + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + send_tensor = _build_tensor(world_size, device_id=device_id) + recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id) + send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size) + recv_op = dist.P2POp( + dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size + ) + reqs = dist.batch_isend_irecv([send_op, recv_op]) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_self_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + p2p_op_list = [] + + if rank == 0: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, 0) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, 0) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + @skip_if_no_gpu + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_no_rank_zero_nccl(self): + self._barrier() + # Ensure the process group has been fully initialized (needed by + # the first sub-group batch_isend_irecv call) + dist.barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + p2p_op_list = [] + + if rank == 1: + peer = 2 + elif rank == 2: + peer = 1 + + if rank in [1, 2]: + send_tensor = _build_tensor(rank + 1, device_id=device_id) + recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id) + recv_op = dist.P2POp(dist.irecv, recv_tensor, peer) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, peer) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # GLOO Batch SEND RECV CPU with provided tags + @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") + def test_batch_isend_irecv_gloo_tags(self): + self._barrier() + rank = dist.get_rank() + p2p_op_list = [] + + for src in range(0, dist.get_world_size()): + if src == rank: + continue + send_tensor = _build_tensor(rank + 1) + recv_tensor = _build_tensor(src + 1, value=-1) + recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src) + p2p_op_list.append(recv_op) + send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank) + p2p_op_list.append(send_op) + + reqs = dist.batch_isend_irecv(p2p_op_list) + for req in reqs: + req.wait() + + self._barrier() + + # NCCL Batch SEND RECV Op Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + with self.assertRaisesRegex(ValueError, "^Invalid ``op``"): + send_tensor = _build_tensor(rank + 1, device_id=device_id) + send_op = dist.P2POp(dist.broadcast, send_tensor, 1) + dist.batch_isend_irecv([send_op]) + + # NCCL Batch SEND RECV p2p_op_list Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_op_list_err(self): + self._barrier() + rank = dist.get_rank() + if rank == 0: + with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"): + dist.batch_isend_irecv([1, 2]) + + # NCCL Batch SEND RECV Mixed Backend Error + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_batch_isend_irecv_mixed_backend_err(self): + self._barrier() + rank = dist.get_rank() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + group_gloo = dist.new_group(ranks=[0, 1], backend="gloo") + group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") + if rank == 0: + with self.assertRaisesRegex( + ValueError, "All ops need to use the same group" + ): + send_tensor = _build_tensor(rank + 1) + send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo) + send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl) + dist.batch_isend_irecv([send_op_gloo, send_op_nccl]) + + # NCCL SEND RECV + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def _test_send_recv_nccl(self, profiler_ctx=None): + # TODO: now that nccl send/recv is supported, there does not seem to + # be a need to have nccl send/recv be tested separately. + rank = dist.get_rank() + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + tensor = _build_tensor(rank + 1, device_id=device_id) + profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext() + with profiler_cls as prof: + for src in range(0, world_size): + if src == rank: + # Send mode + for dst in range(0, world_size): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + expected_tensor = _build_tensor(src + 1) + output_tensor = _build_tensor( + src + 1, value=-1, device_id=device_id + ) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof, dedup_gpu_user_annotation=True) + self.assertTrue(events) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.input_shapes in expected_shapes) + + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl(self): + self._test_send_recv_nccl() + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + def test_send_recv_nccl_autograd_profiler(self): + profiler_ctx = torch.autograd.profiler.profile(record_shapes=True) + self._test_send_recv_nccl(profiler_ctx) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") + @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_nccl_torch_profiler(self): + profiler_ctx = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + record_shapes=True, + ) + self._test_send_recv_nccl(profiler_ctx) + + # SEND RECV + def _test_send_recv(self, profiler_ctx): + rank = dist.get_rank() + send_size = rank + 1 + tensor = _build_tensor(send_size) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for src in range(0, dist.get_world_size()): + if src == rank: + # Send mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + dist.send(tensor, dst) + else: + # Recv mode + recv_size = src + 1 + expected_tensor = _build_tensor(recv_size) + output_tensor = _build_tensor(recv_size, value=-1) + dist.recv(output_tensor, src) + self.assertEqual(output_tensor, expected_tensor) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks. + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + # Event order is not deterministic, so simply assert their shape + # is found in the following list. + expected_shapes = [ + [[rank + 1] * 3] for rank in range(dist.get_world_size()) + ] + for event in events: + self.assertTrue(event.is_async) + self.assertTrue(event.input_shapes in expected_shapes) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl" + ) + def test_send_recv(self): + self._test_send_recv(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv(profiler_ctx=torch_profiler_ctx) + + # SEND RECV ANY SOURCE + def _test_send_recv_any_source(self, profiler_ctx): + rank = dist.get_rank() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + recv_ranks = [] + irecv_ranks = [] + + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, dist.get_world_size()): + if dst == rank: + # Recv mode + for dst in range(0, dist.get_world_size()): + if dst == rank: + continue + + for recv in ["recv", "irecv"]: + output_tensor = _build_tensor(send_recv_size, value=-1) + + if recv == "recv": + sender = dist.recv(output_tensor) + recv_ranks.append(sender) + elif recv == "irecv": + work = dist.irecv(output_tensor) + work.wait() + sender = work._source_rank() + irecv_ranks.append(sender) + + # Assert the scalar value "sender" that should be + # equal to the rank of the sender is equal to all + # values in the received tensor. + self.assertTrue(output_tensor.eq(sender).all()) + else: + # Send mode + dist.send(tensor, dst) # recv + dist.send(tensor, dst) # irecv + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from other rank twice. + self.assertEqual( + sum(event.count for event in events), + 2 * (dist.get_world_size() - 1), + ) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + # Each rank would have 2 * (world_size - 1) sends, verify that + # globally we receive the same amount on the other end. + recv_ranks_tensor = torch.cat( + (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0 + ) + global_recv_ranks = [ + torch.empty_like(recv_ranks_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(global_recv_ranks, recv_ranks_tensor) + global_recv_ranks_list = [] + for tensor in global_recv_ranks: + global_recv_ranks_list += tensor.tolist() + + from itertools import groupby + + global_recv_ranks_list.sort() + frequency = [ + len(list(group)) for key, group in groupby(global_recv_ranks_list) + ] + self.assertEqual(dist.get_world_size(), len(frequency)) + self.assertEqual( + [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source(self): + self._test_send_recv_any_source(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + def test_send_recv_any_source_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["sendrecv anysource"], + f"{BACKEND} does not support send/recv from any source", + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_any_source_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx) + + # SEND RECV WITH TAG + def _test_send_recv_with_tag(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + send_recv_size = 10 + tensor = _build_tensor(send_recv_size, value=rank) + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + for dst in range(0, world_size): + if dst == rank: + # Recv mode + for src in range(0, world_size): + if src == rank: + continue + output_tensor = _build_tensor(send_recv_size, value=-1) + dist.recv(output_tensor, src, tag=src) + self.assertTrue(output_tensor.eq(src).all()) + else: + # Send mode + dist.send(tensor, dst, tag=rank) + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + for event_name in [f"{backend}:send", f"{backend}:recv"]: + events = get_profiling_event(event_name, prof) + # Each rank sends/recvs from all other ranks + event_count = sum(e.count for e in events) + expected_event_count = dist.get_world_size() - 1 + self.assertEqual(event_count, expected_event_count) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, event_name) + self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag(self): + self._test_send_recv_with_tag(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + def test_send_recv_with_tag_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_send_recv_with_tag_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx) + + # ISEND + def _test_isend(self, profiler_ctx): + rank = dist.get_rank() + world_size = dist.get_world_size() + ctx = profiler_ctx if profiler_ctx is not None else nullcontext() + with ctx as prof: + if rank == 0: + requests = [ + dist.isend(_build_tensor(dest, 10), dest) + for dest in range(1, world_size) + ] + for request in requests: + request.wait() + self.assertTrue(request.is_completed()) + else: + tensor = _build_tensor(rank, -1) + dist.recv(tensor, 0) + self.assertEqual(tensor, _build_tensor(rank, 10)) + + self._barrier() + + if profiler_ctx is not None: + backend = dist.get_backend() + if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: + expected_event_name = ( + f"{backend}:send" if rank == 0 else f"{backend}:recv" + ) + events = get_profiling_event(expected_event_name, prof) + event_count = sum(e.count for e in events) + expected_count = dist.get_world_size() - 1 if rank == 0 else 1 + self.assertEqual(expected_count, event_count) + # Event ordering is not guaranteed, so simply ensure the shapes are + # found in the following map. + expected_shapes = { + r: [[r] * 3] for r in range(1, dist.get_world_size()) + } + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, expected_event_name) + if rank == 0: + self.assertTrue( + event.input_shapes in expected_shapes.values() + ) + else: + self.assertEqual(event.input_shapes, expected_shapes[rank]) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend(self): + self._test_isend(profiler_ctx=None) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + def test_isend_autograd_profiler(self): + autograd_profiler_ctx = _create_autograd_profiler() + self._test_isend(profiler_ctx=autograd_profiler_ctx) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support isend" + ) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_isend_torch_profiler(self): + torch_profiler_ctx = _create_torch_profiler() + self._test_isend(profiler_ctx=torch_profiler_ctx) + + # IRECV + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support irecv" + ) + def test_irecv(self): + rank = dist.get_rank() + world_size = dist.get_world_size() + + if rank == 0: + expected_tensors = [ + _build_tensor(src, -1) for src in range(1, world_size) + ] + requests = [ + dist.irecv(expected_tensors[src - 1], src) + for src in range(1, world_size) + ] + + for src in range(1, world_size): + requests[src - 1].wait() + self.assertTrue(requests[src - 1].is_completed()) + self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10)) + else: + tensor = _build_tensor(rank, 10) + dist.send(tensor, 0) + + self._barrier() + + # BROADCAST + def _test_broadcast_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + with_options=False, + ): + for dtype, value, requires_cuda in [ + (torch.float, -1e-10, False), + (torch.double, -1e-100, False), + (torch.half, -0.1, True), + (torch.int8, -2, False), + (torch.uint8, 129, False), + (torch.int, -1e5, False), + (torch.long, -1e15, False), + ]: + if requires_cuda and not cuda: + continue + for src in group: + expected_tensor = _build_tensor(src + 1, value, dtype) + if cuda: + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + if rank == src: + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", + True, + group_id.broadcast, + [expected_tensor], + opts, + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + expected_tensor, + src, + group_id, + ) + else: + tensor = _build_tensor(src + 1, -1, dtype) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if with_options: + opts = dist.BroadcastOptions() + opts.rootTensor = 0 + opts.rootRank = src + self.call_dist_op( + ":broadcast", True, group_id.broadcast, [tensor], opts + ) + else: + self.call_dist_op( + ":broadcast", + False, + dist.broadcast, + tensor, + src, + group_id, + ) + self.assertEqual(tensor.size(), expected_tensor.size()) + self.assertEqual( + tensor.ne(expected_tensor).max(), torch.tensor(False) + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast(self): + group, group_id, rank = self._init_global_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and Nccl backend supports CUDA allReduce", + ) + @skip_if_no_gpu + def test_broadcast_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_group(self): + group, group_id, rank = self._init_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_broadcast_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_broadcast_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "Only NCCL backend supports high priority stream", + ) + @skip_if_no_gpu + def test_nccl_high_priority_stream(self): + group, _, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + + new_port = str(MASTER_PORT + 1) + os.environ["MASTER_PORT"] = new_port + gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size()) + store, rank, size = next(gen_iterator) + store = dist.PrefixStore(new_port, store) + + opts = dist.ProcessGroupNCCL.Options() + opts.is_high_priority_stream = False + group_id = dist.ProcessGroupNCCL(store, rank, size, opts) + + self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True) + + # REDUCE + def _test_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensor = _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensor, + src, + op, + group_id, + tensor_shapes=[tensor.shape], + ) + if rank == src: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_small_worldsize + def test_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # REDUCE TWICE + def _test_reduce_twice_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + ): + for src in group: + tensors = [ + _build_tensor(src + 1).fill_( + master_value if rank == src else worker_value + ) + for i in range(2) + ] + if cuda: + for i in range(2): + tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0]) + self.call_dist_op( + ":reduce", + False, + dist.reduce, + tensors[0], + src, + op, + group_id, + secondary_op_call=lambda: dist.reduce( + tensors[1], src, op, group_id + ), + tensor_shapes=[tensors[0].shape], + ) + if rank == src: + for tensor in tensors: + self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + def test_reduce_sum_twice(self): + group, group_id, rank = self._init_global_test() + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_sum_cuda_twice(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + torch.cuda.set_device(device_id) + self._test_reduce_twice_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + 10 * (len(group) - 1), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports reduce_scatter_v" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["reduce"], + f"{BACKEND} does not support reduce", + ) + @skip_if_no_gpu + def test_reduce_scatter_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + input_split_sizes = [] + for src in group: + input_split_sizes.append(src + 1) + start_len = sum(input_split_sizes[:rank]) + end_len = start_len + input_split_sizes[rank] + sum_len = sum(input_split_sizes) + master_value = 2 + worker_value = 10 + + for async_val in [True, False]: + tensor = _build_tensor(sum_len, worker_value, device_id=device_id) + tensor[start_len:end_len].fill_(master_value) + out_tensor = ( + torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(-1) + .cuda(device_id) + ) + + req = dist.reduce_scatter( + out_tensor, + list(torch.split(tensor, input_split_sizes)), + dist.ReduceOp.SUM, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = 2 + (10 * (len(group) - 1)) + expected_tensor = torch.empty( + input_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test reduce_scatter_tensor accepting single tensor as input + def _reduce_scatter_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + tensor_shapes = [tensor_out.shape] + self.call_dist_op( + ":reduce_scatter_tensor", + False, + dist.reduce_scatter_tensor, + tensor_out, + tensor_in, + dist.ReduceOp.SUM, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor" + ) + @skip_if_no_gpu + def test_reduce_scatter_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_out = torch.zeros(size, dtype=torch.int64) + + # Concatenated input + tensor_in = torch.arange(len(group) * size) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + # Stacked input + tensor_in = torch.reshape(tensor_in, (len(group), size)) + tensor_out = self._reduce_scatter_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + # Check result + # Should be the same as the result in concatenated case + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def call_dist_op( + self, + profiling_title_postfix, + is_async, + op, + *args, + expect_event=True, + secondary_op_call=None, + profile_cuda=False, + tensor_shapes=None, + **kwargs, + ): + op_calls = [lambda: op(*args, **kwargs)] + if secondary_op_call is not None: + op_calls.append(secondary_op_call) + + autograd_profiler_ctx = torch.autograd.profiler.profile( + use_cuda=profile_cuda, record_shapes=True + ) + + # TODO: move this test to use torch.profiler once kineto issues are + # fixed internally. + with autograd_profiler_ctx as prof: + works = [op_call() for op_call in op_calls] + if is_async: + for work in works: + work.wait() + + if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS: + # We are only interested in the backend's implementation not the dispatcher wrapper. + events = get_profiling_event( + dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx + ) + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if dist.get_debug_level() != dist.DebugLevel.DETAIL: + self.assertEqual(len(events), len(op_calls)) + for e in events: + self.assertTrue(e.is_async) + self.assertEqual(e.count, 1) + self.assertGreaterEqual(e.cpu_time, 0) + # Verify tensor shapes if given + # DETAIL debug mode can use a pg wrapper that issues more collectives + # under the hood + if ( + tensor_shapes is not None + and dist.get_debug_level() != dist.DebugLevel.DETAIL + ): + self.assertEqual( + e.input_shapes, + tensor_shapes, + f"event shape: {e.input_shapes} vs tensor {tensor_shapes}", + ) + + # ALL REDUCE + def _test_all_reduce_helper( + self, + group, + group_id, + rank, + op, + master_value, + worker_value, + expected_value, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + async_op=False, + ): + for src in group: + curr_value = master_value if rank == src else worker_value + + tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + if tensor.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor).shape] + else: + tensor_shapes = [tensor.shape] + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + tensor_shapes=tensor_shapes, + ) + # Currently, only Gloo backend has profiling tested with CUDA enabled. + # Only run cuda profiling test for one rank to speed up since + # running with different src_rank does not affect the correctness. + if ( + src == 0 + and cuda + and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS + ): + self.call_dist_op( + ":all_reduce", + async_op, + dist.all_reduce, + tensor, + op, + group_id, + async_op=async_op, + profile_cuda=True, + tensor_shapes=tensor_shapes, + ) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_async(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_async(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + True, + rank_to_GPU, + async_op=True, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_sum_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_complex_unsupported_ops(self): + unsupported_ops = [ + dist.ReduceOp.MAX, + dist.ReduceOp.MIN, + dist.ReduceOp.PRODUCT, + dist.ReduceOp.BAND, + dist.ReduceOp.BOR, + dist.ReduceOp.BXOR, + ] + group, group_id, rank = self._init_global_test() + for unsupported_op in unsupported_ops: + with self.assertRaisesRegex( + ValueError, "all_reduce does not support" + ): + dist.all_reduce( + _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo" and BACKEND != "nccl", + "Only Gloo and NCCL backends will have CUDA allReduce tested", + ) + @skip_if_no_gpu + def test_all_reduce_sum_cuda_complex(self): + torch.cuda.set_device(self.rank) + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + complex(2, 3), + complex(10, 11), + complex(2, 3) + (complex(10, 11) * (len(group) - 1)), + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + 2, + 10, + 2 + (10 * (len(group) - 1)), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + 2, + 10, + reduce(operator.mul, [10] * (len(group) - 1), 2), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_helper( + group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 + ) + + # SPARSE ALL REDUCE + def _test_sparse_all_reduce_sum(self, fn): + group, group_id, rank = self._init_global_test() + + tests = simple_sparse_reduce_tests( + rank, dist.get_world_size(), num_inputs=1 + ) + for (inputs, outputs) in tests: + tensors = [fn(input) for input in inputs] + dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id) + self.assertEqual(tensors[0], outputs[0]) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + def test_sparse_all_reduce_sum(self): + self._test_sparse_all_reduce_sum(lambda t: t) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "gloo", "Only Gloo backend support sparse all reduce" + ) + @skip_if_no_gpu + def test_sparse_all_reduce_sum_cuda(self): + self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda()) + + # ALL REDUCE - COALESCED + @staticmethod + def _all_reduce_coalesced_sum_test_cases(group_size): + return ( + [2, 3, complex(2, 3)], + [10, 11, complex(10, 11)], + [ + 2 + 10 * (group_size - 1), + 3 + 11 * (group_size - 1), + complex(2, 3) + complex(10, 11) * (group_size - 1), + ], + [torch.float, torch.float, torch.cfloat], + ) + + @staticmethod + def _all_reduce_coalesced_product_test_cases(group_size): + return ( + [1, 2], + [3, 4], + [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_min_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [1, 3], + [torch.float, torch.float], + ) + + @staticmethod + def _all_reduce_coalesced_max_test_cases(group_size): + return ( + [1, 4], + [2, 3], + [2, 4], + [torch.float, torch.float], + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_reduce_coalesced_max_complex_unsupported(self): + group, group_id, rank = self._init_global_test() + with self.assertRaisesRegex(ValueError, "all_reduce does not support"): + dist.all_reduce_coalesced( + [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id + ) + + def _test_all_reduce_coalesced_helper( + self, + group, + group_id, + rank, + op, + cuda=False, + rank_to_GPU=None, + ): + test_case_func = { + dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases, + dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases, + dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases, + dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases, + }[op] + + master_values, worker_values, expected_values, dtypes = test_case_func( + len(group) + ) + + for src in group: + curr_values = master_values if rank == src else worker_values + tensors = [ + _build_tensor(src + 1, val, dtype=dtype) + for dtype, val in zip(dtypes, curr_values) + ] + if cuda: + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + tensor_shapes = [] + for tensor in tensors: + if tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(tensor).shape) + else: + tensor_shapes.append(tensor.shape) + self.call_dist_op( + ":all_reduce", + False, + dist.all_reduce_coalesced, + tensors, + op, + group_id, + tensor_shapes=tensor_shapes, + ) + expected_tensors = [ + _build_tensor(src + 1, expected_value, dtype=dtype) + for dtype, expected_value in zip(dtypes, expected_values) + ] + self.assertEqual(tensors, expected_tensors) + + self._barrier() + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_sum(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.SUM, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_product(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_min(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_max(self): + group, group_id, rank = self._init_global_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_sum(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_product(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_min(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None + ) + + @skip_if_small_worldsize + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_group_max(self): + group, group_id, rank = self._init_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_sum(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_product(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.PRODUCT, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_min(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, + group_id, + rank, + dist.ReduceOp.MIN, + cuda=False, + rank_to_GPU=None, + ) + + @require_backend_is_available({"gloo"}) + def test_all_reduce_coalesced_full_group_max(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_reduce_coalesced_helper( + group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None + ) + + # SCATTER + def _test_scatter_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, -1, dtype=dtype) + expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = ( + [_build_tensor(dest + 1, i, dtype=dtype) for i in group] + if rank == dest + else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(t).shape for t in tensors] + else: + tensor_shapes = [t.shape for t in tensors] + self.call_dist_op( + ":scatter", + False, + dist.scatter, + tensor, + src=dest, + scatter_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(tensor, expected_tensor) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify scatter_list argument only on source rank. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, src=0, scatter_list=scatter_list) + else: + dist.scatter(output, src=0) + self.assertEqual(output, one * rank) + + # Don't specify src argument. + output = one.clone() * -1 + if rank == 0: + scatter_list = [one.clone() * i for i in group] + dist.scatter(output, scatter_list=scatter_list) + else: + dist.scatter(output) + self.assertEqual(output, one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_complex(self): + group, group_id, rank = self._init_global_test() + self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_scatter_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_scatter_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_scatter_group(self): + group, group_id, rank = self._init_group_test() + self._test_scatter_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_scatter_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_scatter_helper(group, group_id, rank) + + # GATHER + def _test_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank) + tensors = ( + [_build_tensor(dest + 1, -1) for i in group] if rank == dest else [] + ) + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + self.call_dist_op( + ":gather", + False, + dist.gather, + tensor, + dst=dest, + gather_list=tensors, + group=group_id, + expect_event=False, + tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None, + ) + if rank == dest: + expected_tensors = [_build_tensor(dest + 1, i) for i in group] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_checks(self): + group, group_id, rank = self._init_global_test() + one = torch.ones([1]) + + # Specify gather_list argument only on destination rank. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, dst=0, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank, dst=0) + + # Don't specify dst argument. + if rank == 0: + gather_list = [one.clone() for _ in group] + dist.gather(one * rank, gather_list=gather_list) + for i in group: + self.assertEqual(gather_list[i], one * i) + else: + dist.gather(one * rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather(self): + group, group_id, rank = self._init_global_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA gather" + ) + @skip_if_no_gpu + def test_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @skip_if_small_worldsize + def test_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + def test_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_gather_helper(group, group_id, rank) + + # ALL GATHER + def _test_all_gather_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + for dest in group: + tensor = _build_tensor(dest + 1, rank, dtype=dtype) + tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group] + allgather = dist.all_gather + if cuda: + tensor = tensor.cuda(rank_to_GPU[rank][0]) + tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] + if tensors[0].dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensors[0]).shape] + else: + tensor_shapes = [tensors[0].shape] + self.call_dist_op( + ":all_gather", + False, + allgather, + tensors, + tensor, + group_id, + False, + tensor_shapes=tensor_shapes, + ) + + expected_tensors = [ + _build_tensor(dest + 1, i, dtype=dtype) for i in group + ] + for t1, t2 in zip(tensors, expected_tensors): + self.assertEqual(t1, t2) + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all gather" + ) + @skip_if_no_gpu + def test_all_gather_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_gather_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "Nccl does not support CPU tensors" + ) + def test_all_gather_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports all_gather_v" + ) + @skip_if_no_gpu + def test_all_gather_v_cuda(self): + self._barrier() + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + + output_split_sizes = [] + for dst in group: + output_split_sizes.append(dst + 1) + sum_len = sum(output_split_sizes) + value = 2 + + for async_val in [True, False]: + tensor = ( + torch.empty( + output_split_sizes[rank], sum_len, sum_len, dtype=torch.float + ) + .fill_(value) + .cuda(device_id) + ) + out_tensor = _build_tensor(sum_len, -1, device_id=device_id) + + req = dist.all_gather( + list(torch.split(out_tensor, output_split_sizes)), + tensor, + group_id, + async_val, + ) + if async_val: + req.wait() + + expected_value = value + expected_tensor = _build_tensor( + sum_len, expected_value, device_id=device_id + ) + + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + # Test all_gather accepting single tensor as output + def _all_gather_into_tensor_helper( + self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None + ): + if cuda: + tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) + tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) + if tensor_out.dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(tensor_in).shape] + else: + tensor_shapes = [tensor_in.shape] + self.call_dist_op( + ":all_gather_into_tensor", + False, + dist.all_gather_into_tensor, + tensor_out, + tensor_in, + group_id, + False, + expect_event=False, + tensor_shapes=tensor_shapes, + ) + return tensor_out + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_cat_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Concatenated output + tensor_out = torch.ones([len(group) * size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Concatenate all blocks into a bigger tensor + expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" + ) + @skip_if_no_gpu + def test_all_gather_into_stack_tensor_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + size = 2 + tensor_in = torch.ones([size, size]) * rank + # Stacked output + tensor_out = torch.ones([len(group), size, size]) * (-1) + tensor_out = self._all_gather_into_tensor_helper( + tensor_out, tensor_in, group_id, rank, True, rank_to_GPU + ) + + # Check result + # Stack all blocks into a bigger tensor + expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group]) + self.assertEqual(tensor_out, expected_tensor) + self._barrier() + + def _run_all_gather_coalesced_and_verify( + self, output_tensor_lists, input_tensors, expected_tensors, group_id + ): + """ + Helper that runs all_gather_coalesced and returns true if output + matches expectations. + """ + tensor_shapes = [] + for input_tensor in input_tensors: + if input_tensor.dtype == torch.complex64: + tensor_shapes.append(torch.view_as_real(input_tensor).shape) + else: + tensor_shapes.append(input_tensor.shape) + self.call_dist_op( + ":all_gather", + False, + dist.all_gather_coalesced, + output_tensor_lists, + input_tensors, + group_id, + tensor_shapes=tensor_shapes, + ) + + for l1, l2 in zip(output_tensor_lists, expected_tensors): + for t1, t2 in zip(l1, l2): + if not torch.equal(t1, t2): + return False + return True + + def _test_all_gather_coalesced_helper( + self, group, group_id, rank, dtype=torch.float + ): + # TODO: Instead we should probably go through _rank_not_in_group + # mechanism to disable sending tensors + if group_id is not None: + for test_case_id in range(2, 5): + # Make sure we create tensors of incompatible sizes, e.g. + # [1], [2x2], [3x3x3] ... to be sent in one batch + input_tensors = [ + _build_multidim_tensor( + tensor_id, tensor_id, rank + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + output_tensor_lists = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, -1, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for _ in group + ] + expected_tensors = [ + [ + _build_multidim_tensor( + tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype + ) + for tensor_id in range(1, test_case_id) + ] + for rank_iter in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensor_lists, input_tensors, expected_tensors, group_id + ), "output tensors do not match expected outputs" + + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_simple(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_gather_coalesced_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_gather_coalesced_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["allgather_coalesced"], + f"{BACKEND} does not support all_gather_coalesced", + ) + def test_all_gather_coalesced_with_empty(self): + group, group_id, rank = self._init_global_test() + input_tensors = [ + rank * torch.ones([2, 2]), + torch.ones([0]), + (rank + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + output_tensors_lists = [ + [ + -1 * torch.ones([2, 2]), + -1 * torch.ones([0]), + -1 * torch.ones([3, 3]), + -1 * torch.ones([0]), + -1 * torch.ones([0]), + ] + for _ in group + ] + expected_tensors = [ + [ + r * torch.ones([2, 2]), + torch.ones([0]), + (r + 1) * torch.ones([3, 3]), + torch.ones([0]), + torch.ones([0]), + ] + for r in group + ] + assert self._run_all_gather_coalesced_and_verify( + output_tensors_lists, input_tensors, expected_tensors, group_id + ) + self._barrier() + + # AllToAll + def _test_all_to_all_single_equal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_tensor = torch.ones([size, size], dtype=dtype) * rank + expected_tensor = torch.cat( + [torch.ones([1, size], dtype=dtype) * i for i in group] + ) + out_tensor = torch.ones([size, size], dtype=dtype) * -1 + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + if dtype == torch.complex64: + tensor_shapes = [torch.view_as_real(in_tensor).shape] + else: + tensor_shapes = [in_tensor.shape] + self.call_dist_op( + ":all_to_all", + False, + dist.all_to_all_single, + out_tensor, + in_tensor, + group=group_id, + tensor_shapes=tensor_shapes, + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_single_unequal_split_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + out_splits = [rank + 1 for _ in group] + in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank + out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype) + expected_tensor = torch.cat( + [torch.ones([rank + 1, size], dtype=dtype) * i for i in group] + ) + if cuda: + in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) + expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) + out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) + dist.all_to_all_single( + out_tensor, in_tensor, out_splits, in_splits, group=group_id + ) + self.assertEqual(out_tensor, expected_tensor) + self._barrier() + + def _test_all_to_all_helper( + self, + group, + group_id, + rank, + cuda=False, + rank_to_GPU=None, + dtype=torch.float, + ): + if group_id is not None: + size = len(group) + in_splits = [i + 1 for i in group] + in_tensors = [ + torch.ones([in_splits[i], size], dtype=dtype) * rank + for i, _ in enumerate(group) + ] + out_tensors = [ + torch.ones([(rank + 1), size], dtype=dtype) for _ in group + ] + expected_tensors = [ + torch.ones([rank + 1, size], dtype=dtype) * i for i in group + ] + if cuda: + in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors] + expected_tensors = [ + t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors + ] + out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors] + dist.all_to_all(out_tensors, in_tensors, group=group_id) + for t1, t2 in zip(out_tensors, expected_tensors): + self.assertEqual(t1, t2) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_single_unequal_split_helper( + group, group_id, rank, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + dtype=torch.cfloat, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_complex(self): + group, group_id, rank = self._init_global_test() + self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_cuda_complex(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper( + group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_equal_split_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + @skip_if_small_worldsize + def test_all_to_all_single_unequal_split_group_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + @skip_if_small_worldsize + def test_all_to_all_group(self): + group, group_id, rank = self._init_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_small_worldsize + @skip_if_rocm_multiprocess + def test_all_to_all_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_equal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_equal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_equal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_equal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" + ) + def test_all_to_all_single_unequal_split_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" + ) + @skip_if_no_gpu + def test_all_to_all_single_unequal_split_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_single_unequal_split_helper( + group, + group_id, + rank, + True, + rank_to_GPU, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi", "Only MPI supports all_to_all" + ) + def test_all_to_all_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_all_to_all_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" + ) + @skip_if_rocm_multiprocess + def test_all_to_all_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) + + # BARRIER + def _test_barrier_helper( + self, group, group_id, rank, cuda=False, rank_to_GPU=None + ): + WAIT_TIME = 0.3 # seconds + + for dest in group: + expected_time = torch.DoubleTensor(1).fill_(0.0) + if cuda: + expected_time = expected_time.cuda(rank_to_GPU[rank][0]) + if dest == rank: + expected_time.fill_(time.time() + WAIT_TIME) + dist.broadcast(expected_time, dest, group_id) + time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer + dist.barrier(group_id) + else: + dist.broadcast(expected_time, dest, group_id) + dist.barrier(group_id) + self.assertGreaterAlmostEqual( + float(time.time()), + float(expected_time[0]), + msg="destination rank: %d, my rank: %d" % (dest, rank) + + " (if you see this failure, please report in #14554)", + ) + + # Use higher timeout for the instance where the test runs + # against a subgroup and uses a CUDA tensor for expected time. + # The CUDA initialization for the participating processes can + # take long enough for the barrier timeout to trigger on the + # process that doesn't participate in the group. + self._barrier(timeout=20) + + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + def test_barrier_cuda(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_group_cuda(self): + group, group_id, rank = self._init_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_if_small_worldsize + @skip_if_no_gpu + @skip_but_pass_in_sandcastle_if( + BACKEND == "mpi", "MPI doesn't supports GPU barrier" + ) + def test_barrier_full_group_cuda(self): + group, group_id, rank = self._init_full_group_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier(self): + group, group_id, rank = self._init_global_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_group(self): + group, group_id, rank = self._init_group_test() + self._test_barrier_helper(group, group_id, rank) + + @skip_but_pass_in_sandcastle_if( + BACKEND in DistTestCases.skip_collective["cpu barrier"], + f"{BACKEND} does not support CPU barrier", + ) + def test_barrier_full_group(self): + group, group_id, rank = self._init_full_group_test() + self._test_barrier_helper(group, group_id, rank) + + def _model_step(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad = None + + def _model_step_with_zero_grad(self, model): + for param in model.parameters(): + if param.grad is not None: + with torch.no_grad(): + param += param.grad + param.grad.requires_grad_(False) + param.grad.zero_() + + def _prepare_dummy_data(self, local_bs): + # global_bs for DDP should be divisible by WORLD_SIZE + world_size = int(os.environ["WORLD_SIZE"]) + global_bs = world_size * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + return global_bs, input_cpu, target, loss + + # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL + def _test_DDP_helper( + self, model, input_var, target, loss, scale_factor=1.0, memory_format=None + ): + model.train() + output = model(input_var) + l = loss(output, target) * scale_factor + l.backward() + if memory_format is not None: + self.assertTrue(output.is_contiguous(memory_format=memory_format)) + + def _assert_equal_param(self, param_gpu, param_DDP): + self.assertEqual(len(param_gpu), len(param_DDP)) + for p_gpu, p_DDP in zip(param_gpu, param_DDP): + self.assertEqual(p_gpu, p_DDP) + + def _test_DDP_niter( + self, + model_base, + model_DDP, + input, + target, + loss, + local_bs, + rank, + batch_size, + test_save, + offset=None, + world_size=0, + zero_grad=False, + memory_format=None, + n_iter=5, + ): + for idx in range(n_iter): + # single cpu/gpu training + self._test_DDP_helper( + model_base, input, target, loss, memory_format=memory_format + ) + + if offset is None: + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + world_size * local_bs / batch_size if world_size != 0 else 1, + memory_format=memory_format, + ) + + # Update weights and run a second iteration to shake out errors + if zero_grad: + self._model_step_with_zero_grad(model_base) + self._model_step_with_zero_grad(model_DDP) + else: + self._model_step(model_base) + self._model_step(model_DDP) + self._assert_equal_param( + list(model_base.parameters()), list(model_DDP.module.parameters()) + ) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + # save the model in the middle and reload + if test_save and idx == 2 and INIT_METHOD.startswith("file://"): + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + with tempfile.TemporaryFile() as tmp_file: + torch.save(model_DDP, tmp_file) + tmp_file.seek(0) + # weights_only=False as this is legacy code that saves the model + saved_model = torch.load(tmp_file, weights_only=False) + for k in model_DDP.state_dict(): + self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k]) + + def _test_DistributedDataParallel( + self, + gpu_subset, + rank, + output_device=None, + gradient_as_bucket_view=False, + static_graph=False, + set_static_graph_twice=False, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = DDP_NET + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = copy.deepcopy(model) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=gpu_subset, + gradient_as_bucket_view=gradient_as_bucket_view, + static_graph=static_graph, + ) + + if set_static_graph_twice: + model_DDP._set_static_graph() + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + # dummy data initialization + local_bs = len(gpu_subset) + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False): + # Run a simple end to end DDP-CPU model, use result of single node + # model as baseline + group, group_id, rank = self._init_global_test() + + # cpu training setup + model_base = DDP_NET + + # DDP-CPU training setup + model_DDP = copy.deepcopy(model_base) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, gradient_as_bucket_view=gradient_as_bucket_view + ) + + # dummy data initialization + local_bs = 2 + global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_base, + model_DDP, + input_cpu, + target, + loss, + local_bs, + rank, + global_bs, + False, + zero_grad=True, + ) + self._barrier() + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU(self): + self._test_DistributedDataParallelCPU() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_DistributedDataParallelCPU_grad_is_view(self): + self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_DistributedDataParallel_requires_grad(self): + # a module without gradients shouldn't be accepted + self.assertRaises( + RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_zero_output_features(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10) + self.relu = nn.ReLU() + self.net2 = nn.Linear(10, 0) + + model = ToyModel().to(self.rank) + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + + @skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test") + def test_ddp_create_graph(self): + class Model(nn.Module): + def __init__(self) -> None: + super().__init__() + self.p = nn.Parameter(torch.tensor(1.0)) + + def forward(self): + return self.p.pow(2) + + model = Model() + ddp_model = torch.nn.parallel.DistributedDataParallel(model) + for _ in range(6): + # Verify DDP doesn't throw when ran with create_graph=True. + # Although we do warn about potential issues, please see + # https://github.com/pytorch/pytorch/issues/63929 for details. + ddp_model().backward(create_graph=True) + # grad tensors should require grad. + self.assertTrue( + all(param.requires_grad for param in ddp_model.parameters()) + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedDataParallel_non_default_stream(self): + stream = torch.cuda.Stream(self.rank) + rank = self.rank + with torch.cuda.stream(stream): + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank] + ) + for i in range(1000): + # Clear gradients manually + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net.module.weight.grad + avg = grad.clone() + # All-reducing the gradient averages should give us the gradient + # average. If not, then one of the workers has not correctly + # written back the averaged gradient before this all-reduce call. + dist.all_reduce(avg) + world_size = int(os.environ["WORLD_SIZE"]) + avg.div_(world_size) + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual( + avg[0, 0], + expected_grad, + msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_comm_hook_logging(self): + hooks = [ + default.allreduce_hook, + default.fp16_compress_hook, + powerSGD.powerSGD_hook, + powerSGD.batched_powerSGD_hook, + quantization_hooks.quantization_pertensor_hook, + quantization_hooks.quantization_perchannel_hook, + ] + + cpp_builtin_hooks = [ + dist.BuiltinCommHookType.ALLREDUCE, + dist.BuiltinCommHookType.FP16_COMPRESS, + ] + + for hook in hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model.register_comm_hook(None, hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__) + + for hook in cpp_builtin_hooks: + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + ddp_model._register_builtin_comm_hook(hook) + ddp_logging_data = ddp_model._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook)) + + # No hook registered + ddp_model = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1, bias=False).cuda(self.rank), + device_ids=[self.rank], + ) + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Hook not registered yet, so should be empty + self.assertEqual(ddp_logging_data.get("comm_hook"), None) + # After second forward pass, hook should still be empty string + for i in range(2): + inp = torch.ones(1, 1, device=self.rank) + loss = ddp_model(inp).sum() + loss.backward() + + ddp_logging_data = ddp_model._get_ddp_logging_data() + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("comm_hook", ""), "") + + def _test_ddp_hook_with_optimizer_parity( + self, + grad_as_bucket_view, + static_graph, + optim_cls, + optimize_subset, + *functional_optim_args, + **functional_optim_kwargs, + ): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + models_to_test = [ + (LargeNet(), torch.randn(1, 1000).cuda()), + ] + if HAS_TORCHVISION: + models_to_test.append( + (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda()) + ) + for (model, inp) in models_to_test: + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + # Create DDP model that runs optimizer in fused fashion. + ddp_model_with_optimizer_hook = ( + torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + ) + + # Create DDP model with no hook that does optimizer after + # backward. + ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_as_bucket_view, + static_graph=static_graph, + ) + hook_params = ddp_model_with_optimizer_hook.parameters() + no_hook_params = ddp_model_with_no_hook.parameters() + if optimize_subset: + hook_params = list(hook_params) + no_hook_params = list(no_hook_params) + self.assertGreater(len(hook_params), 0) + hook_params = [hook_params[0]] + no_hook_params = [no_hook_params[0]] + + # Register a fused optimizer that will run optimizer in step + # with allreduce. + + if optimize_subset: + # API where optim_params is specified. + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + optim_params=hook_params, + **functional_optim_kwargs, + ) + else: + # API where optim_params is omitted + ddp_model_with_optimizer_hook._register_fused_optim( + optim_cls, + *functional_optim_args, + **functional_optim_kwargs, + ) + + optimizer_no_hook = optim_cls( + no_hook_params, + *functional_optim_args, + **functional_optim_kwargs, + ) + + # Verify parameters are equal initially. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Save old parameters to later verify optimizer modified them. + opt_hook_init_params = copy.deepcopy( + list(ddp_model_with_optimizer_hook.parameters()) + ) + + # Run optimizer with hook model. + for i in range(6): + ddp_model_with_optimizer_hook.zero_grad() + out = ddp_model_with_optimizer_hook(inp) + loss = out.sum() + loss.backward() + + dist.barrier() + + # Run regular model. + for i in range(6): + ddp_model_with_no_hook.zero_grad() + out = ddp_model_with_no_hook(inp) + loss = out.sum() + loss.backward() + optimizer_no_hook.step() + + dist.barrier() + + # Now verify parameters are equal. + for hook_param, allreduce_param in zip( + ddp_model_with_optimizer_hook.parameters(), + ddp_model_with_no_hook.parameters(), + ): + self.assertEqual(hook_param, allreduce_param) + + # Verify optimizer modified appropriate parameter set, + # otherwise they'd be trivially equal above. + if optimize_subset: + self.assertNotEqual( + opt_hook_init_params[0], + next(iter(ddp_model_with_optimizer_hook.parameters())), + ) + # Untouched params should be equal + self.assertEqual( + opt_hook_init_params[1:], + list(ddp_model_with_optimizer_hook.parameters())[1:], + ) + else: + self.assertNotEqual( + opt_hook_init_params, + list(ddp_model_with_optimizer_hook.parameters()), + ) + dist.barrier() + + """ + # Commenting out the following 3 tests as they cause Sandcastle jobs to fail + # Failure signature: + # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw + + from torch.testing._internal.common_utils import parametrize + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("grad_as_bucket_view", [True, False]) + @parametrize("static_graph", [True, False]) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adamw( + self, + grad_as_bucket_view, + static_graph, + optimize_subset, + ): + adamw_lr = 1e-2 + adamw_betas = (0.9, 0.99) + adamw_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + grad_as_bucket_view, + static_graph, + torch.optim.AdamW, + optimize_subset, + adamw_lr, + betas=adamw_betas, + eps=adamw_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): + adam_lr = 1e-2 + adam_betas = (0.9, 0.99) + adam_eps = 1e-6 + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static graph + torch.optim.Adam, + optimize_subset, + adam_lr, + betas=adam_betas, + eps=adam_eps, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl" or BACKEND == "ucc", + "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", + ) + @skip_if_lt_x_gpu(2) + @parametrize("optimize_subset", [True, False]) + def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): + sgd_lr = 1e-2 + sgd_momentum = 0.9 + sgd_weight_decay = 0.01 + # Not testing grad_as_bucket_view and static_graph as they are + # tested in AdamW test above. + self._test_ddp_hook_with_optimizer_parity( + True, # grad as bucket view + False, # static_graph + torch.optim.SGD, + optimize_subset, + sgd_lr, + momentum=sgd_momentum, + weight_decay=sgd_weight_decay, + ) + """ + + @skip_if_lt_x_gpu(2) + def test_get_data_parallel_params(self): + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank] + ) + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params( + model, named_params=True + ) + for name, _ in dp_params: + self.assertNotEqual(f"module.{params_to_ignore[0]}", name) + + # test named_params=False, just check if returns the expected + # no of parameters. + num_ddp_params = len(list(model.parameters())) - 1 + count = 0 + dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(model, named_params=False) + for _ in dp_params: + count += 1 + self.assertEqual(count, num_ddp_params) + + def _test_ddp_apply_optim_in_backward( + self, + optim_cls, + optim_kwargs, + init_before, + gradient_as_bucket_view=True, + ): + # Need to seed to ensure inputs are unique across rank. Otherwise, + # allreduce won't have any effect. + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + torch.cuda.set_device(self.rank) + + # Test a simple linear as well as a ResNet model. + models_to_test = [ + nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda() + ] + if HAS_TORCHVISION: + models_to_test.append(torchvision.models.resnet50().cuda()) + + for j, model in enumerate(models_to_test): + model_optim_in_bwd = copy.deepcopy(model) + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + optim = optim_cls(model.parameters(), **optim_kwargs) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + model_optim_in_bwd = nn.parallel.DistributedDataParallel( + model_optim_in_bwd, + device_ids=[self.rank], + gradient_as_bucket_view=gradient_as_bucket_view, + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=optim_cls, + params=model_optim_in_bwd.parameters(), + optimizer_kwargs=optim_kwargs, + ) + + for p1, p2 in zip(model.parameters(), model_optim_in_bwd.parameters()): + self.assertEqual(p1, p2, "Parameters not initially equal!") + # Enable determinism in cudnn operators + with torch.backends.cudnn.flags( + enabled=True, deterministic=True, benchmark=False + ): + for i in range(8): + inp = ( + torch.randn(1, 3, 1000, 1000, device="cuda") + if j == 1 + else torch.randn(10, 3, device="cuda") + ) + model(inp).sum().backward() + optim.step() + model_optim_in_bwd( + inp + ).sum().backward() # runs optimizer as well + for p1, p2 in zip( + model.parameters(), model_optim_in_bwd.parameters() + ): + self.assertEqual( + p1, p2, f"Params not equal at iteration {i}" + ) + self.assertTrue( + p2.grad is None, + f"Optim in backward grad is not None at {i}", + ) + + # set_to_none for regular optimizer to match in backward + # case. + optim.zero_grad(set_to_none=True) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward(self): + for optim_cls, init_before in itertools.product( + [torch.optim.SGD, torch.optim.Adam], [True, False] + ): + with self.subTest(optim_cls=optim_cls): + self._test_ddp_apply_optim_in_backward( + optim_cls=optim_cls, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self): + for init_before in [True, False]: + self._test_ddp_apply_optim_in_backward( + optim_cls=torch.optim.SGD, + optim_kwargs={"lr": 0.03}, + init_before=init_before, + gradient_as_bucket_view=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_apply_optim_in_backward_ignored_params(self): + torch.cuda.set_device(self.rank) + for init_before in [True, False]: + with self.subTest(init_before=init_before): + torch.manual_seed(self.rank) + torch.cuda.manual_seed(self.rank) + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + if init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if not init_before: + _apply_optimizer_in_backward( + optimizer_class=torch.optim.SGD, + params=model.parameters(), + optimizer_kwargs={"lr": 0.03}, + ) + inp = torch.randn(1, 10) + a, b = net(inp) + (a.transpose(0, 1) @ b).sum().backward() + # a.weight did not go through allreduce, so optimizer acted on local + # gradient, which should be different across ranks. Remaining params + # should be equal. + models = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(models, model) + rank0_model, remainder = models[0], models[1:] + for m in remainder: + self.assertNotEqual(rank0_model.a.weight, m.a.weight) + self.assertEqual( + list(rank0_model.b.parameters()), list(m.b.parameters()) + ) + self.assertEqual(rank0_model.a.bias, m.a.bias) + + def _get_fp16_config(self) -> _MixedPrecision: + return _MixedPrecision( + param_dtype=torch.float16, + reduce_dtype=torch.float16, + buffer_dtype=torch.float16, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_ignored_params(self): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + model = TwoLinLayerNet() + model.register_buffer("buffer", torch.ones(5)) + # Parameters to ignore are in the format {module_name}.{param_name} + to_ignore = ["a.weight", "buffer"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, to_ignore, + ) + mp_config = self._get_fp16_config() + net = torch.nn.parallel.DistributedDataParallel( + model.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=True, + ) + to_ignore = [f"module.{name}" for name in to_ignore] + expected_ignored = len(to_ignore) + n_ignored = 0 + # ignored params should not have _mp_param or _fp_param fields. + for (n, p) in itertools.chain(net.named_parameters(), net.named_buffers()): + if n in to_ignore: + n_ignored += 1 + self.assertFalse(hasattr(p, '_mp_param')) + self.assertFalse(hasattr(p, '_fp_param')) + else: + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + self.assertEqual(expected_ignored, n_ignored) + + def _test_ddp_native_mixed_precision( + self, gradient_as_bucket_view, set_grad_to_none + ): + rank = self.rank + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + torch.cuda.set_device(rank) + inp = torch.randn(10, 1) + mp_config = self._get_fp16_config() + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.m = torch.nn.Linear(1, 5) + self.register_buffer('buffer', torch.randn(1, 2)) + self.p = torch.nn.Parameter( + torch.randn(10, 5), requires_grad=False + ) + + def forward(self_, x): # noqa: B902 + params = self_.m.parameters() + for p in params: + self.assertEqual(mp_config.param_dtype, p.dtype) + + self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype) + + self.assertEqual(mp_config.param_dtype, x.dtype) + return self_.m(x) + self_.p + + m = MyModel() + + net = torch.nn.parallel.DistributedDataParallel( + m.to(rank), + device_ids=[rank], + mixed_precision=mp_config, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + # Buffers are casted in constructor. + self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype) + # Each param should have an mp_param in the lower precision, and + # an fp_param in the higher precision. + for p in net.parameters(): + self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) + self.assertEqual(torch.float32, p._fp_param.dtype) + + for i in range(6): + loss = net(inp).sum() + loss.backward() + # Verify gradient synchronization and params and grads are fp32. + for n, param in net.named_parameters(): + self.assertEqual(param.dtype, torch.float32) + if param.grad is None: + assert n == 'module.p' # Only param that doesn't require grad + else: + self.assertEqual(param.grad.dtype, torch.float32) + tensor_list = [ + torch.zeros_like(param.grad) + for _ in range(dist.get_world_size(net.process_group)) + ] + dist.all_gather(tensor_list, param.grad) + g, rest = tensor_list[0], tensor_list[1:] + self.assertEqual(g.dtype, torch.float32) + for g_ in rest: + self.assertEqual(g_.dtype, torch.float32) + self.assertEqual(g, g_) + net.zero_grad(set_to_none=set_grad_to_none) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=False, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, + set_grad_to_none=False, + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + @skip_if_lt_x_gpu(2) + def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(self): + self._test_ddp_native_mixed_precision( + gradient_as_bucket_view=True, set_grad_to_none=True + ) + + def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100): + rank = self.rank + m = torch.nn.Linear(1, 5) + try: + process_group = state.process_group + except AttributeError: + process_group = state + + net_with_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + net_with_hook.register_comm_hook(state=state, hook=hook) + net_without_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(m).to(rank), + device_ids=[rank], + process_group=process_group, + ) + for i in range(100): + # Clear gradients manually. + for g in [ + net_without_hook.module.weight.grad, + net_with_hook.module.weight.grad, + ]: + if g is not None: + g.requires_grad_(False) + g.zero_() + # Forward + BW + batch = torch.tensor([rank]).float().cuda(rank) + loss = net_without_hook(batch).sum() + loss.backward() + # For each worker, the gradient on the weight should be worker_rank. + grad = net_without_hook.module.weight.grad + avg = grad.clone() + expected_grad = ( + sum(i for i in range(dist.get_world_size())) / dist.get_world_size() + ) + loss_hook = net_with_hook(batch).sum() + loss_hook.backward() + grad_hook = net_with_hook.module.weight.grad + avg_hook = grad_hook.clone() + + if i < num_validated_iters: + # Verify hook grad with expected. + self.assertEqual( + avg_hook[0, 0].item(), + expected_grad, + msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}", + ) + # Verify hook grad with vanilla allreduce + self.assertEqual( + avg_hook[0, 0], + avg[0, 0], + msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}", + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce(self): + self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_allreduce_process_group(self): + # process_group is passed in to both DDP and comm. hook + world_size = dist.get_world_size() + rank_to_GPU = init_multigpu_helper(world_size, BACKEND) + gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)] + process_group = torch.distributed.new_group(gpus) + self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_powerSGD(self): + for warm_start in [True, False]: + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=2, + warm_start=warm_start, + ) + self._test_ddp_hook_parity( + state=powersgd_state, hook=powerSGD.powerSGD_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_but_pass_in_sandcastle_if( + NO_MULTIPROCESSING_SPAWN, + "Disabled for environments that \ + don't support multiprocessing with spawn start method", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_ddp_hook_parity_post_localSGD(self): + # Although we start run local SGD at iteration 10, since we still use the global process group to run it, + # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + # Only validate the warmup iterations before local SGD is applied, + # because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all. + # Note that in practice a model averager has to be applied to run model averaging, + # so local gradient averaging is not necessary. + start_localSGD_iter = 10 + state = post_localSGD.PostLocalSGDState( + process_group=None, + subgroup=dist.group.WORLD, + start_localSGD_iter=start_localSGD_iter, + post_local_gradient_allreduce=False, + ) + self._test_ddp_hook_parity( + state=state, + hook=post_localSGD.post_localSGD_hook, + num_validated_iters=start_localSGD_iter, + ) + + # When `subgroup` is None, it is equivalent to the subgroup on the each node. + # For this single-node test environment, the intra-node process group is equivalent to + # the global process group. + if self.world_size == dist.get_world_size(): + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=10 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + # Since we start local SGD later than the total number of 100 iterations, + # no local SGD actually is executed, and we don't even need to provide a subgroup for this case. + state = post_localSGD.PostLocalSGDState( + process_group=None, subgroup=None, start_localSGD_iter=1000 + ) + self._test_ddp_hook_parity( + state=state, hook=post_localSGD.post_localSGD_hook + ) + + def _prepare_single_device_module( + self, + rank, + process_group, + devices, + device_ids, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + device = devices[0] if devices else torch.device("cuda:%d" % rank) + ddp_model = DistributedDataParallel( + copy.deepcopy(model).to(device), + device_ids=device_ids, + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + + model.to(device) + + input = torch.randn(global_batch_size, 2).to(device) + target = torch.randn(global_batch_size, 4).to(device) + + return model, ddp_model, input, target + + def _prepare_cpu_module( + self, + process_group, + global_batch_size, + gradient_as_bucket_view=False, + ): + model = Net() + ddp_model = DistributedDataParallel( + copy.deepcopy(model), + process_group=process_group, + bucket_cap_mb=0.001, + gradient_as_bucket_view=gradient_as_bucket_view, + ) + input = torch.randn(global_batch_size, 2) + target = torch.randn(global_batch_size, 4) + return model, ddp_model, input, target + + def _test_accumulate_gradients_no_sync( + self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False + ): + """ + This is the recommended way to implement accumulate grads. + If ``ddp_comm_hook`` input was specified, it will also register that hook + to the ``ddp_model``. The hook fed into this function should not change + the resulting gradients. + """ + group, group_id, rank = self._init_global_test() + world_size = get_world_size() + + # FIXME: Add testing for gloo/CUDA + if BACKEND == "mpi" or BACKEND == "gloo": + global_batch_size = world_size + local_batch_size = 1 + model, ddp_model, input, target = self._prepare_cpu_module( + group_id, global_batch_size, gradient_as_bucket_view + ) + + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + int_devices = rank_to_GPU[rank][:1] + devices = [torch.device("cuda:" + str(i)) for i in int_devices] + global_batch_size = world_size + local_batch_size = len(devices) + model, ddp_model, input, target = self._prepare_single_device_module( + rank, + group_id, + devices, + devices, + global_batch_size, + gradient_as_bucket_view, + ) + + if ddp_comm_hook is not None: + ddp_model.register_comm_hook(group_id, ddp_comm_hook) + + def step_model(model, input, target): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + loss.backward() + + # ensure accumulate grads works with no_grad => no grads are accumulated. + with torch.no_grad(): + with ddp_model.no_sync(): + ddp_model.train() + ddp_model(input) + + # check two model parameters over num_iters iterations + for iteration in range(num_iters): + step_model(model, input, target) + + ddp_input = input[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + ddp_target = target[ + rank * local_batch_size : (rank + 1) * local_batch_size + ] + + if iteration % 2 == 0: + # accumulate grads locally + with ddp_model.no_sync(): + step_model(ddp_model, ddp_input, ddp_target) + else: + # sync grads + step_model(ddp_model, ddp_input, ddp_target) + + for i, j in zip(model.parameters(), ddp_model.parameters()): + if not i.requires_grad: + continue + if iteration % 2 == 0: + self.assertNotEqual(i.grad, j.grad) + else: + self.assertEqual(i.grad, j.grad) + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + iteration) + input = input[torch.randperm(global_batch_size)] + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync() + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_grad_is_view(self): + """ + Runs _test_accumulate_gradients_no_sync using default inputs + """ + self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync + using allreduce hook and validates whether future result was properly + passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + tensors = [bucket.buffer() / world_size] + return ( + group_id.allreduce(tensors) + .get_future() + .then(lambda fut: fut.value()[0]) + ) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self): + """ + Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce + hook that also uses then callbacks. In first then callback result is multiplied + by 2, and the second callback divides the result by 2 * world_size. It validates + whether final result was properly passed as gradients in reducer. + """ + + world_size = get_world_size() + + def allreduce_with_then_hook( + group_id: object, bucket: dist.GradBucket + ) -> torch.futures.Future[torch.Tensor]: + fut = group_id.allreduce([bucket.buffer()]).get_future() + + def mult(fut): + # Multiply the result by 2. + return 2 * fut.wait()[0] + + def div(fut): + # Divide the result by 2 * world_size. + return fut.wait() / (2 * world_size) + + return fut.then(mult).then(div) + + self._test_accumulate_gradients_no_sync( + num_iters=4, ddp_comm_hook=allreduce_with_then_hook + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", + "get_future is only supported on mpi, nccl and gloo", + ) + @nccl_skip_if_lt_x_gpu(BACKEND, 2) + def test_get_future(self): + def mult(fut): + return [t * 3 for t in fut.wait()] + + def add(fut): + return [t + 1 for t in fut.wait()] + + group, group_id, rank = self._init_global_test() + input = _build_tensor(3, 2) + if BACKEND == "nccl": + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + device_id = rank_to_GPU[rank][0] + input = input.to(device_id) + fut = group_id.allreduce([input]).get_future() + res = fut.then(mult).then(add).wait() + expected = _build_tensor(3, 2 * len(group) * 3 + 1) + + self.assertEqual(res[0], expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel(self): + group, group_id, rank = self._init_global_test() + rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) + gpus = list(rank_to_GPU[rank]) + + for use_bucket_view, static_graph in itertools.product( + (False, True), (False, True) + ): + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test set static graph twice + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + set_static_graph_twice=True, + ) + + # test output_device + self._test_DistributedDataParallel( + gpu_subset=gpus, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + # test device_ids + gpus_list = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel( + gpu_subset=gpus_list, + rank=rank, + output_device=torch.device("cuda"), + gradient_as_bucket_view=use_bucket_view, + static_graph=static_graph, + ) + + def _test_DistributedDataParallel_with_amp(self, grad_is_view=False): + torch.manual_seed(31415) + # Creates model and optimizer in default precision + model = copy.deepcopy(DDP_NET).cuda() + optimizer = torch.optim.SGD(model.parameters(), lr=0.03) + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + ddp_model = nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + # verify grads are none before training + for p in ddp_model.parameters(): + self.assertTrue(p is not None) + self.assertTrue(p.grad is None) + + for idx in range(20): + optimizer.zero_grad() + # Runs the forward pass with autocasting. + with autocast(): + output = ddp_model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + # Backward passes under autocast are not recommended. + # Backward ops run in the same dtype autocast chose for corresponding forward ops. + scaler.scale(loss).backward() + + # verify grads are not none and are valid during training + for p in ddp_model.parameters(): + if p.requires_grad: + self.assertTrue(p.grad is not None) + self.assertFalse(p.grad.isnan().any()) + self.assertFalse(p.grad.isinf().any()) + + # scaler.step() first unscales the gradients of the optimizer's assigned params. + # If these gradients do not contain infs or NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + # Shuffle the input so that DDP input is different + torch.manual_seed(1337 + idx) + input = input[torch.randperm(dist.get_world_size() * 2)] + + return ddp_model + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_with_amp_and_grad_is_view(self): + torch.cuda.set_device(self.rank) + ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=False + ) + ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp( + grad_is_view=True + ) + for i, j in zip( + ddp_model_grad_not_view.parameters(), + ddp_model_grad_is_view.parameters(), + ): + self.assertEqual(i, j) + + def _test_DistributedDataParallel_SyncBatchNorm( + self, + gpu_subset, + rank, + local_bs, + global_bs, + offset, + output_device=None, + affine=True, + ): + # Run a simple end to end DDP model, use result of single node model + # as baseline + + # cpu training setup + model = BN_NET if affine else BN_NET_NO_AFFINE + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpu_subset[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpu_subset[0]) + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP, device_ids=gpu_subset + ) + + # test serializable/unserializable + with tempfile.NamedTemporaryFile() as tmp: + if sys.platform == "win32": + torch.save(model_DDP, tmp) + tmp.seek(0) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp, weights_only=False) + else: + torch.save(model_DDP, tmp.name) + # weights_only=False as this is legacy code that saves the model + model_DDP = torch.load(tmp.name, weights_only=False) + + # data initialization + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 4) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpu_subset[0]), + target.cuda(gpu_subset[0]), + loss, + local_bs, + rank, + global_bs, + True, + offset, + dist.get_world_size(), + 5 if affine else 2, + ) + self._barrier() + + def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view): + learning_rate = 0.03 + + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + averager = create_averager() + opt = torch.optim.SGD(net.parameters(), lr=learning_rate) + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), + device_ids=[self.rank], + gradient_as_bucket_view=grad_is_view, + ) + # Process group cannot be pickled in some environments, + # so cannot deep copy an averager. See: + # https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496 + averager2 = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step(opt, net, loss_fn, input, target) + averager.average_parameters(net.parameters()) + + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + for p1, p2 in zip( + net.parameters(), net_using_post_localSGD_opt.parameters() + ): + self.assertEqual(p1.data, p2.data) + + # Also check if the built-in step counters are the same to prevent a bug like #74737. + self.assertEqual(averager.step, averager2.step) + + def _create_periodic_model_averager(self): + return averagers.PeriodicModelAverager(period=4, warmup_steps=10) + + def _create_post_localSGD_optimizer(self, net, learning_rate, averager): + return post_localSGD_optimizer.PostLocalSGDOptimizer( + optim=torch.optim.SGD(net.parameters(), lr=learning_rate), + averager=averager, + ) + + def _perform_a_train_step(self, optimizer, net, loss_fn, input, target): + optimizer.zero_grad() + output = net(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + + def _test_post_localSGD_optimizer_step_reload( + self, create_averager, chkpt_file + ): + learning_rate = 0.03 + + net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank] + ) + + averager = create_averager() + post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager + ) + + averager2 = create_averager() + dummy_post_localSGD_opt = self._create_post_localSGD_optimizer( + net_using_post_localSGD_opt, learning_rate, averager2 + ) + + input = torch.randn(dist.get_world_size() * 2, 2).cuda() + target = torch.randn(dist.get_world_size() * 2, 4).cuda() + loss_fn = nn.MSELoss() + + for _ in range(20): + self._perform_a_train_step( + post_localSGD_opt, + net_using_post_localSGD_opt, + loss_fn, + input, + target, + ) + + if self.rank == 0: + torch.save( + {"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % self.rank} + checkpoint = torch.load(chkpt_file, map_location=map_location) + dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"]) + + # Check that we didn't hit the trivial case + self.assertNotEqual(averager2.step, 0) + # Check if dummy averager was initialized to a correct value + self.assertEqual(averager.step, averager2.step) + + # Remove 'step' entry from a checkpoint. + # And make sure it is not in the state dictionary + del checkpoint["optimizer_state_dict"]["step"] + self.assertNotIn("step", checkpoint["optimizer_state_dict"]) + + # Check if checkpoint without a 'step' entry invokes a warning + with self.assertWarnsRegex( + expected_warning=UserWarning, + expected_regex="Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0.", + ): + dummy_post_localSGD_opt.load_state_dict( + checkpoint["optimizer_state_dict"] + ) + + self.assertEqual(averager2.step, 0) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_grad_is_view(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_periodic_model_averager, + grad_is_view=True, + ) + + def _create_hierarchical_model_averager(self): + period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())]) + return hierarchicalSGD.HierarchicalModelAverager( + period_group_size_dict=period_group_size_dict, warmup_steps=4 + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=False, + ) + + @skip_if_lt_x_gpu(4) + @skip_if_odd_worldsize + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view( + self, + ): + torch.cuda.set_device(self.rank) + self._test_post_localSGD_optimizer_parity( + self._create_hierarchical_model_averager, + grad_is_view=True, + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_post_localSGD_optimizer_step_reload(self): + torch.cuda.set_device(self.rank) + with _rank_temp_file() as tmp_file: + self._test_post_localSGD_optimizer_step_reload( + self._create_periodic_model_averager, tmp_file + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self): + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last + ) + self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + torch.channels_last_3d + ) + + def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format( + self, memory_format + ): + group, group_id, rank = self._init_global_test() + num_processes = dist.get_world_size() + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(num_processes * 2) + + model = ONLY_SBN_NET + model_gpu = copy.deepcopy(model).cuda(rank) + model_DDP = nn.parallel.DistributedDataParallel( + model_gpu, device_ids=[rank] + ) + + shapes = [global_bs, 2, 4, 4] + ( + [] if memory_format is torch.channels_last else [4] + ) + + input_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + target_gpu = ( + torch.randn(*shapes, dtype=torch.float) + .cuda(rank) + .to(memory_format=memory_format) + ) + loss = nn.MSELoss() + + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_gpu, + target_gpu, + loss, + local_bs, + rank, + global_bs, + True, + bs_offset, + dist.get_world_size(), + memory_format=memory_format, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + # test output_device + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + # test device_ids + gpus = [torch.device("cuda:" + str(i)) for i in gpus] + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + output_device=torch.device("cuda"), + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + local_bs = 2 + bs_offset = int(rank * 2) + global_bs = int(world_size * 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + affine=False, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = len(gpus) * 2 + global_bs = dist.get_world_size() * local_bs + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + @require_world_size(2) + def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self): + group, group_id, rank = self._init_global_test() + # DDP does not support replicating BN layers within a process, hence + # testing with one module replica per process + gpus = [rank] + + model = nn.BatchNorm1d(2) + + # single gpu training setup + model_gpu = copy.deepcopy(model) + model_gpu.cuda(gpus[0]) + + # DDP training setup + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) + model_DDP.cuda(gpus[0]) + model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) + + local_bs = 1 + global_bs = dist.get_world_size() + input_cpu = torch.randn(global_bs, 2) + target = torch.randn(global_bs, 2) + loss = nn.MSELoss() + + # disabling cudnn. + # SyncBatchNorm goes through native_batch_norm kernel, this avoids the + # numerical issue created by the divergent code path. + with torch.backends.cudnn.flags(False): + # check two model parameters over 5 iterations + self._test_DDP_niter( + model_gpu, + model_DDP, + input_cpu.cuda(gpus[0]), + target.cuda(gpus[0]), + loss, + local_bs, + rank, + global_bs, + True, + ) + self._barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value( + self, + ): + group, group_id, rank = self._init_global_test() + model = nn.parallel.DistributedDataParallel( + ONLY_SBN_NET.cuda(rank), device_ids=[rank] + ) + + input_var = [] + for i in range(dist.get_world_size()): + input_var_rank = torch.cat( + [ + torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)), + torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)), + ], + dim=1, + ) + input_var.append(input_var_rank) + + all_input_var = torch.cat( + [ + x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) + for x in input_var + ], + dim=1, + ).cuda(rank) + + for i in range(100): + y = model(input_var[rank].cuda(rank)) + y.mean().backward() + + running_mean, running_var = ( + model.module.running_mean, + model.module.running_var, + ) + torch.testing.assert_close(running_mean, all_input_var.mean(1)) + torch.testing.assert_close(running_var, all_input_var.var(1)) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self): + group, group_id, rank = self._init_global_test() + # only do single GPU per process + gpus = [rank] + + # cpu training setup + model = BN_NET + + num_processes = dist.get_world_size() + local_bs = rank + 2 + bs_offset = int((rank + 3) * rank / 2) + global_bs = int((num_processes + 3) * num_processes / 2) + + self._test_DistributedDataParallel_SyncBatchNorm( + gpu_subset=gpus, + rank=rank, + local_bs=local_bs, + global_bs=global_bs, + offset=bs_offset, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_DistributedDataParallel_SyncBatchNorm_half(self): + group, group_id, rank = self._init_global_test() + + model = copy.deepcopy(BN_NET) + model = model.half() + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = nn.parallel.DistributedDataParallel(model.cuda(rank), device_ids=[rank]) + inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank)) + # Check that forward/backward do not error with dtype mismatch + out = model(inp) + self.assertEqual(out.dtype, torch.float16) + out.sum().backward() + for param in model.parameters(): + self.assertEqual(param.grad.dtype, torch.float16) + + def _test_ddp_logging_data(self, is_gpu): + rank = dist.get_rank() + model_DDP = copy.deepcopy(DDP_NET) + if is_gpu: + model_DDP = nn.parallel.DistributedDataParallel( + model_DDP.cuda(rank), device_ids=[rank] + ) + else: + model_DDP = nn.parallel.DistributedDataParallel(model_DDP) + + # dummy data initialization + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + if is_gpu: + input = input.cuda(rank) + target = target.cuda(rank) + + model_DDP._set_ddp_runtime_logging_sample_rate(2) + + for idx in range(20): + offset = rank * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + + self._model_step_with_zero_grad(model_DDP) + + # Verify DDP logging data is sampled as expected + # If it has ran more than 10 iterations and this is + # the sampled iteration for measuring run time stats, + # the run time stats for this idx-th iteration will not + # be zeros. + ddp_logging_data = model_DDP._get_ddp_logging_data() + if idx > 0 and (idx < 10 or idx % 2 == 0): + self.assertGreaterEqual( + ddp_logging_data.get("forward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_compute_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("backward_comm_time"), + ddp_logging_data.get("backward_compute_comm_overlap_time"), + ) + self.assertEqual(ddp_logging_data.get("iteration"), idx) + elif idx > 0: + # if the idx-th iteration is not sampled to set runtime stats, + # ddp_logging_data.iteration will not be updated to current + # iteration. + self.assertNotEqual(ddp_logging_data.get("iteration"), idx) + + # Shuffle the input so that DDP input is different + input = input[torch.randperm(batch_size)] + + return model_DDP + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_ddp_logging_data_cpu(self): + def parse_env(var): + return os.environ[var] if var in os.environ else "N/A" + + dist.set_debug_level(dist.DebugLevel.INFO) + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=False) + + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size()) + self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank()) + self.assertEqual(ddp_logging_data.get("module_name"), "Net") + self.assertEqual(ddp_logging_data.get("device_ids"), "") + # output_device is -1 in default if it is not set, e.g. + # output_device of CPU training is -1. + self.assertEqual(ddp_logging_data.get("output_device"), -1) + self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1) + self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024) + self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0) + self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0) + self.assertEqual( + ddp_logging_data.get("backend_name"), dist.get_backend(group_id) + ) + self.assertEqual(ddp_logging_data.get("iteration"), 18) + params = list(model_DDP.parameters()) + num_params = 0 + param_size = 0 + params = list(filter(lambda parameter: parameter.requires_grad, params)) + for p in params: + num_params += 1 + param_size += p.numel() * p.element_size() + self.assertEqual(ddp_logging_data.get("dtypes"), "float") + self.assertEqual( + ddp_logging_data.get("total_parameter_size_bytes"), param_size + ) + self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params) + self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size)) + self.assertEqual( + ddp_logging_data.get("master_port"), parse_env("MASTER_PORT") + ) + self.assertEqual( + ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR") + ) + self.assertEqual( + ddp_logging_data.get("torch_distributed_debug"), + parse_env("TORCH_DISTRIBUTED_DEBUG"), + ) + self.assertEqual( + ddp_logging_data.get("cuda_visible_devices"), + parse_env("CUDA_VISIBLE_DEVICES"), + ) + if ddp_logging_data.get("backend_name") == "gloo": + self.assertEqual( + ddp_logging_data.get("gloo_socket_ifname"), + parse_env("GLOO_SOCKET_IFNAME"), + ) + self.assertEqual( + ddp_logging_data.get("gloo_device_transport"), + parse_env("GLOO_DEVICE_TRANSPORT"), + ) + default_gloo_threads = 2 + self.assertEqual( + ddp_logging_data.get("gloo_num_threads"), + default_gloo_threads, + ) + + self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None) + self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None) + self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None) + self.assertEqual(ddp_logging_data.get("nccl_debug"), None) + self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None) + self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None) + # test runtime logging fields + # Note: DETAIL debug mode logs DDP logging data to stdout and + # thus accesses std::map, which fills in a default value for the + # type if it didn't exist. + self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0) + self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1) + self.assertEqual( + ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size) + ) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), 1 + ) + self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + # test larger net with mixed data types, verify multiple bucket sizes + model = LargeNet() + model.float() + model.fc1.double() + model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5) + ddp_logging_data = model_DDP._get_ddp_logging_data() + params = list(model_DDP.parameters()) + self.assertEqual( + ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024) + ) + bucket_sizes = [ + params[1].numel() * params[1].element_size(), + params[0].numel() * params[0].element_size(), + ] + self.assertEqual( + ddp_logging_data.get("bucket_sizes"), + ", ".join(str(x) for x in bucket_sizes), + ) + self.assertEqual(ddp_logging_data.get("dtypes"), "double, float") + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_no_gpu + def test_ddp_logging_data_gpu(self): + group, group_id, rank = self._init_global_test() + model_DDP = self._test_ddp_logging_data(is_gpu=True) + ddp_logging_data = model_DDP._get_ddp_logging_data() + self.assertEqual(ddp_logging_data.get("device_ids"), str(rank)) + self.assertEqual(ddp_logging_data.get("output_device"), rank) + grad_ready_order = ddp_logging_data.get( + "prev_iteration_grad_ready_order_indices" + ) + expected_order = list(reversed([str(x) for x in range(3)])) + self.assertEqual(grad_ready_order, ", ".join(expected_order)) + bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") + self.assertEqual(bucket_indices, " ".join(expected_order)) + # test runtime logging fields + # It is hard to test accurate latency, but it can test whether the latency is + # a valid value and in the expected range. + self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1 + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_compute_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + self.assertGreaterEqual( + ddp_logging_data.get("avg_backward_comm_time"), + ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), + ) + # Test host-side times are roughly in the order that we expect + fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") + bwd_comp_start_host_side_time = ddp_logging_data.get( + "backward_compute_time_start" + ) + bwd_comp_end_host_side_time = ddp_logging_data.get( + "backward_compute_time_end" + ) + bwd_comm_start_host_side_time = ddp_logging_data.get( + "backward_comm_time_start" + ) + bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") + self.assertGreaterEqual( + bwd_comm_end_host_side_time, bwd_comm_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comm_start_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual( + bwd_comp_end_host_side_time, bwd_comp_start_host_side_time + ) + self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "nccl", "nccl does not support DDP on CPU models" + ) + def test_static_graph_api_cpu(self): + model_DDP = nn.parallel.DistributedDataParallel(DDP_NET) + expected_err = "should be called before training loop starts" + with self.assertRaisesRegex(RuntimeError, expected_err): + local_bs = 2 + batch_size, input, target, loss = self._prepare_dummy_data(local_bs) + offset = dist.get_rank() * local_bs + + # DDP training, DDP scatters subsets of input to nodes/GPUs + self._test_DDP_helper( + model_DDP, + input[offset : offset + local_bs], + target[offset : offset + local_bs], + loss, + 1, + ) + model_DDP._set_static_graph() + + # Verify error was logged in ddp_logging_data. + verify_ddp_error_logged(model_DDP, expected_err) + + @skipIfNoTorchVision + def test_SyncBatchNorm_process_group(self): + # When adopting `convert_sync_batchnorm` to convert a `nn.modules`, + # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm` + # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models). + + process_ids = 0 + process_group = torch.distributed.new_group([process_ids]) + res50_model = torchvision.models.resnet50() + res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(res50_model), process_group + ) + process_group_sync = res50_model_sync.layer1[0].bn1.process_group + self.assertEqual(process_group_sync, process_group) + + def _run_reduction_test( + self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None + ): + if reduction_fn != dist.all_reduce and dst is None: + raise ValueError(f"Reduction fn {reduction_fn} must specify dst!") + if dst is not None: + reduction_fn(tensor, dst, op) + # Only destination rank tensor is expected to have final result. + if dist.get_rank() == dst: + self.assertEqual(tensor, expected_tensor) + else: + reduction_fn(tensor, op) + self.assertEqual(tensor, expected_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allreduce(self): + torch.cuda.set_device(self.rank) + # Run all_reduce with PRODUCT + element = self.rank % 2 == 0 + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([False, False]).to(self.rank), op + ) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test(input_tensor, expected_tensor, op) + + # Run all_reduce with SUM + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor([element, element]).to(self.rank) + self._run_reduction_test( + input_tensor, torch.tensor([True, True]).to(self.rank), op + ) + # TODO: NCCL backend does not work correctly for bitwise reduction ops + # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for + # these once it is supported. + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_allgather(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, True]} + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + # Preserve a copy of the tensor to compare against after allgather. + input_tensor_copy = input_tensor.clone() + tensor_list = [ + torch.tensor([False, False]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, input_tensor) + + self.assertEqual(len(tensor_list), dist.get_world_size()) + for i, t in enumerate(tensor_list): + expected = torch.tensor(inp[i % 2]).to(self.rank) + self.assertEqual(t, expected) + # Ensure that the input tensor is not modified, since this collective + # does not modify its input. + self.assertEqual(input_tensor_copy, input_tensor) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_nccl_backend_bool_reduce(self): + torch.cuda.set_device(self.rank) + inp = {0: [True, True], 1: [False, False]} + # Run reduce() with product op + for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = torch.tensor([False, False]).to(self.rank) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + # Ensure that all ranks contributing True (cast to 1) results in the + # correct reduction. + input_tensor = torch.tensor([True, True]).to(self.rank) + expected_tensor = input_tensor.clone() + self._run_reduction_test( + input_tensor, expected_tensor, op, dist.reduce, dst=0 + ) + + for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: + input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) + expected = ( + torch.tensor([True, True]).to(self.rank) + if self.rank == 0 + else input_tensor.clone() + ) + self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) + + @require_backend_is_available({"nccl"}) + @skip_if_lt_x_gpu(2) + def test_nccl_backend_bool_broadcast(self): + tensor_size = 10 + bcast_tensor = torch.tensor( + [ + (random.random() < 0.5 if self.rank == 0 else False) + for _ in range(tensor_size) + ] + ).to(self.rank) + dist.broadcast(bcast_tensor, src=0) + # Now allgather and ensure the tensors are equal. + tensor_list = [ + torch.tensor([False for _ in range(tensor_size)]).to(self.rank) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, bcast_tensor) + expected = tensor_list[0] + for tensor in tensor_list[1:]: + self.assertEqual(tensor, expected) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_DistributedSampler_padding(self): + # Tests padding of distributed sampler. + world_size = dist.get_world_size() + + # Simulates the 'casual' dataset size + dataset_size = 100 + world_size + 1 + dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)] + + # Simulates the 'tiny' dataset size + dataset_tiny_size = max(world_size // 2 - 1, 1) + dataset_tiny = [ + torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size) + ] + + # Specifying drop_last=True will cause the tail of the data to be dropped. + dist_sampler = DistributedSampler(dataset=dataset, drop_last=True) + local_num_samples, local_dataset_size = ( + dist_sampler.num_samples, + dist_sampler.total_size, + ) + # The effective dataset size should be the greatest integer that is <= + # dataset_size that is divisible by the world_size. This is to ensure each + # rank processes the same number of samples. + effective_dataset_size = ( + math.ceil((dataset_size - world_size) / world_size) + if dataset_size % world_size != 0 + else dataset_size / world_size + ) + self.assertEqual(local_num_samples, effective_dataset_size) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler)) + self.assertEqual(len(indices_list), local_num_samples) + + def validate_global_samples(local_num_samples): + # Ensure that each rank processes the same number of samples. + world_samples = [ + torch.LongTensor([0]).to(self.rank) for _ in range(world_size) + ] + dist.all_gather( + world_samples, torch.tensor([local_num_samples]).to(self.rank) + ) + world_samples = [sample.item() for sample in world_samples] + self.assertEqual(len(set(world_samples)), 1) + + validate_global_samples(local_num_samples) + + # drop_last=False is the default and will add additional indices to be sampled, + # increasing the effective dataset size. + dist_sampler_added_samples = DistributedSampler(dataset=dataset) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples.num_samples, + dist_sampler_added_samples.total_size, + ) + # The effective dataset size is the smallest integer that is >= dataset_size + # and divisible by the world size. + self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size)) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples)) + self.assertEqual(len(indices_list), local_num_samples) + + # Ensure that each rank processes the same number of samples. + validate_global_samples(local_num_samples) + + # Ensure additional samples are padded even when + # the extremely small dataset is given. + dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny) + local_num_samples, local_dataset_size = ( + dist_sampler_added_samples_tiny.num_samples, + dist_sampler_added_samples_tiny.total_size, + ) + self.assertEqual( + local_num_samples, math.ceil(dataset_tiny_size / world_size) + ) + self.assertEqual(local_dataset_size, local_num_samples * world_size) + indices_list = list(iter(dist_sampler_added_samples_tiny)) + self.assertEqual(len(indices_list), local_num_samples) + validate_global_samples(local_num_samples) + + def _test_allgather_object(self, subgroup=None): + # Only set device for NCCL backend since it must use GPUs. + + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + output_gathered = [None for _ in range(dist.get_world_size())] + dist.all_gather_object( + output_gathered, + gather_objects[self.rank % len(gather_objects)], + group=subgroup, + ) + + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + def test_all_gather_object_default_pg(self): + return self._test_allgather_object() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_all_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_allgather_object(subgroup=subgroup) + + def _test_gather_object(self, pg=None): + # Ensure stateful objects can be gathered + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + my_rank = dist.get_rank(pg) + + backend = os.environ["BACKEND"] + if backend == "nccl": + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + torch.cuda.set_device(next_rank) + + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=my_rank))) + + output_gathered = [None for _ in range(dist.get_world_size(pg))] + gather_on_rank = 0 + dist.gather_object( + gather_objects[self.rank % len(gather_objects)], + object_gather_list=output_gathered + if my_rank == gather_on_rank + else None, + dst=gather_on_rank, + group=pg, + ) + if my_rank != gather_on_rank: + self.assertEqual( + output_gathered, [None for _ in range(dist.get_world_size())] + ) + else: + for i, val in enumerate(output_gathered): + expected = gather_objects[i % len(gather_objects)] + self.assertEqual(val, expected) + + # Validate errors when objects can't be pickled. + class Bar: + pass + + b = Bar() + gather_objects = [b for _ in range(dist.get_world_size())] + with self.assertRaisesRegex(AttributeError, "Can't pickle local object"): + dist.all_gather_object( + [None for _ in range(dist.get_world_size())], + gather_objects[self.rank], + group=pg, + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object(self): + return self._test_gather_object() + + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" + ) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) + def test_gather_object_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_gather_object(subgroup) + + def validate_net_equivalence(self, net): + # Helper to validate synchronization of nets across ranks. + net_module_states = list(net.module.state_dict().values()) + # Check that all tensors in module's state_dict() are equal. + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for tensor in tensor_list: + self.assertEqual(tensor, t) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sync_module_states(self): + # Test that after calling _sync_module_states, models across ranks + # are the same and are equal to the model on the input rank. + dim = 2 + rank = self.rank + rank_to_broadcast = 1 + # Seed to ensure that ranks are initialized with different initial models. + torch.manual_seed(rank) + model = nn.Linear(dim, dim, bias=False) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + new_model = nn.Linear(dim, dim, bias=False).cuda(rank) + net.module = copy.deepcopy(new_model) + # Assert params are different + net_module_states = list(net.module.state_dict().values()) + for t in net_module_states: + tensor_list = [ + torch.zeros_like(t) for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, t) + for i, tensor in enumerate(tensor_list): + if i == rank: + self.assertEqual(t, tensor) + else: + # tensor from another rank should be different. + self.assertNotEqual(t, tensor) + + _sync_module_states( + module=net.module, + process_group=net.process_group, + broadcast_bucket_size=net.broadcast_bucket_size, + src=rank_to_broadcast, + params_and_buffers_to_ignore=net.parameters_to_ignore, + ) + # Now all model params should be the same. + self.validate_net_equivalence(net) + # Since the network params were broadcast from rank_to_broadcast, validate that + # they are the same as new_model on rank_to_broadcast. + if rank == rank_to_broadcast: + expected_states = new_model.state_dict().values() + for t, expected in zip(net_module_states, expected_states): + self.assertEqual(t, expected) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_grad_div_uneven_inputs(self): + # Test gradient division during training with join() API. If + # divide_by_initial_world_size=False, we scale by the effective world + # size when allreducing grads. + dim = 5 + batch = 1 + grad_scale = 50 + rank = self.rank + model = nn.Linear(dim, dim, bias=False) + inp = torch.ones(batch, dim, device=self.rank) * grad_scale + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 + ) + n_iters = 3 + if self.rank > 0: + n_iters += 2 + + with net.join(divide_by_initial_world_size=False): + for _ in range(n_iters): + loss = net(inp).sum() + loss.backward() + # The grad is always expected_grad, since we divide by the number + # of currently active processes and inactive processes contribute + # zero gradient. If we kept dividing by static initial world + # size as processes leave, the grad would be smaller. + expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grads so that it's the same every iteration + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + # If divide_by_initial_world_size=True (default), we always scale grads + # by the initial world_size. + with net.join(divide_by_initial_world_size=True): + for i in range(n_iters): + loss = net(inp).sum() + loss.backward() + effective_ws = dist.get_world_size() + if i >= 3: + effective_ws -= 1 + expected_grad = ( + torch.ones(dim, dim, device=self.rank) + * grad_scale + * effective_ws + ) / dist.get_world_size() + param = next(iter(net.parameters())) + self.assertEqual(expected_grad, param.grad) + # Avoid accumulating grad so that it's the same every iteration. + net.zero_grad() + torch.cuda.synchronize(device=self.rank) + + def _test_ddp_profiling(self, profiler_ctx, profiler_ctx2=None): + """Runs DDP based model training and captures profiles. + This test will do two profiler runs. + 1. An inital basic run to check if profiler events are correctly captured. + 2. A second profiling pass after running some iterations of DDP, to check robustness of thread local state. + + args + profiler_ctx : Profiler context manager for pass 1 + profiler_ctx2 : Profiler context manager for pass 2. + This can be left out as None, in which case a deepcopy + of profiler_ctx is used. + Returns: + prof: Instantiated profiler object that can be used for post analysis. + """ + batch = 3 + dim = 10 + num_iters = 6 + torch.cuda.set_device(self.rank) + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + if profiler_ctx2 is None: + profiler_ctx2 = copy.deepcopy(profiler_ctx) + + with profiler_ctx as prof: + for i in range(num_iters): + loss = net(inp).sum() + loss.backward() + + all_reduce_event_name = f"{dist.get_backend()}:all_reduce" + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in events) + self.assertEqual(event_count, num_iters) + for event in events: + self.assertTrue(event.is_async) + self.assertEqual(event.name, all_reduce_event_name) + + broadcast_event_name = f"{dist.get_backend()}:broadcast" + broadcast_events = get_profiling_event(broadcast_event_name, prof, dedup_gpu_user_annotation=True) + event_count = sum(e.count for e in broadcast_events) + # Broadcast is called during rebuild_buckets + self.assertGreaterEqual(event_count, 1) + for event in broadcast_events: + self.assertEqual(event.name, broadcast_event_name) + + # Run DDP with profiling for a few iterations, then enable profiling + # for a single pass, and ensure it is recorded. This tests that the + # thread local state is correctly updated. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + for i in range(3): + loss = net(inp).sum() + loss.backward() + # Now enable the profiler. + with profiler_ctx2 as prof: + loss = net(inp).sum() + loss.backward() + + events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) + self.assertGreaterEqual(len(events), 1) + self.assertGreaterEqual(events[0].count, 1) + self.assertEqual(events[0].name, all_reduce_event_name) + for event in events: + self.assertTrue(event.is_async) + # Ensure searching unused parameters was profiled + events = get_profiling_event("search_unused_parameters", prof) + self.assertEqual(len(events), 1) + + return prof + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle("Currently failing in NVIDIA internal CI") + def test_ddp_profiling_autograd_profiler(self): + autograd_profiler_ctx = torch.autograd.profiler.profile() + return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + def test_ddp_profiling_torch_profiler(self): + cpu_act = torch.profiler.ProfilerActivity.CPU + cuda_act = torch.profiler.ProfilerActivity.CUDA + torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act]) + prof = self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx) + + if dist.get_backend() != "nccl": + return + + # Note comment out the "os.remove(trace_file)" in `get_profiler_nccl_meta()` + # to debug any mismatches. + nccl_meta_events = get_profiler_nccl_meta(prof) + self.assertGreater(len(nccl_meta_events), 0) + + nccl_meta = self._sanity_check_profiler_nccl_meta(nccl_meta_events) + + # additionally check the specific collectives in this test case + self.assertEqual(len(nccl_meta["allreduce"]), 2) + self.assertEqual(len(nccl_meta["wait"]), 1) + + # check allreduce message sizes + a0 = nccl_meta["allreduce"][0] + self.assertEqual(a0["Out msg nelems"], 100, msg=f"{a0}") + self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") + a1 = nccl_meta["allreduce"][1] + self.assertEqual(a1["Out msg nelems"], 1, msg=f"{a1}") + self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") + + def _validate_execution_trace_nccl(self, et_file: str) -> None: + """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" + We test for basic fields in theese nodes in the Execution Trace. + """ + with open(et_file) as f: + et = json.load(f) + pg_cfg_node = [n for n in et["nodes"] if n["name"] == "## process_group:init ##"] + self.assertGreaterEqual(len(pg_cfg_node), 1) + nccl_meta_nodes = [n for n in et["nodes"] if n["name"] == "record_param_comms"] + self.assertEqual(len(nccl_meta_nodes), 3) + per_coll_meta = defaultdict(list) + + # Sanity check NCCL metadata nodes + for n in nccl_meta_nodes: + attrs_list = n.get("attrs", []) + self.assertGreater(len(attrs_list), 0) + attrs = {a["name"]: a["value"] for a in attrs_list} + + collname = attrs.get("collective_name", "") + self.assertNotEqual(collname, "") + self.assertNotEqual(attrs.get("dtype", ""), "") + + per_coll_meta[collname].append(attrs) + if collname in {"wait"}: + continue + + self.assertEqual(attrs["pg_name"], "0") # yes this is a string + self.assertEqual(attrs["pg_desc"], "default_pg") + self.assertEqual(attrs["pg_size"], 2) + + self.assertGreaterEqual(attrs.get("in_msg_nelems", -1), 0) + self.assertGreaterEqual(attrs.get("out_msg_nelems", -1), 0) + self.assertTrue("in_split_size" in attrs.keys()) + self.assertTrue("out_split_size" in attrs.keys()) + self.assertEqual(attrs.get("global_rank_start", -1), 0) + self.assertEqual(attrs.get("global_rank_stride", -1), 1) + + # print(per_coll_meta) + self.assertEqual(len(per_coll_meta["allreduce"]), 2) + self.assertEqual(len(per_coll_meta["wait"]), 1) + + # check allreduce message sizes + a0 = per_coll_meta["allreduce"][0] + self.assertEqual(a0["out_msg_nelems"], 100, msg=f"{a0}") + self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") + a1 = per_coll_meta["allreduce"][1] + self.assertEqual(a1["out_msg_nelems"], 1, msg=f"{a1}") + self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") + + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", + ) + @unittest.skipIf(BACKEND != "nccl", "Tests nccl metadata primarily.") + def test_ddp_profiling_execution_trace(self): + self.assertEqual(dist.get_backend(), "nccl") + # Create a temp file to save execution trace data + fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) + fp.close() + et_file = fp.name + et = ExecutionTraceObserver().register_callback(et_file) + + # first profiler context need not have ET + torch_profiler_ctx1 = torch.profiler.profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + ) + # collect ET in second profiler pass + torch_profiler_ctx2 = torch.profiler.profile( + activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], + execution_trace_observer=et + ) + prof = self._test_ddp_profiling( + profiler_ctx=torch_profiler_ctx1, + profiler_ctx2=torch_profiler_ctx2, + ) + + print(f"Execution trace saved at {fp.name}") + self._validate_execution_trace_nccl(et_file) + + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_join_model_equivalence(self): + # Verifies equivalence with model training locally and with DDP under + # the join context manager. + batch = 3 + dim = 10 + learning_rate = 0.03 + model = nn.Linear(dim, dim, bias=False) + inp = torch.rand(batch, dim, device=self.rank) + local_model = copy.deepcopy(model) + local_model = local_model.cuda(self.rank) + rank_to_iter_mapping = { + rank: 2 * (rank + 1) for rank in range(dist.get_world_size()) + } + # run local model + local_iters = sum(rank_to_iter_mapping.values()) + local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate) + for _ in range(local_iters): + local_optim.zero_grad() + out = local_model(inp) + loss = out.sum() + loss.backward() + local_optim.step() + + # run DDP model with join API + num_iters = rank_to_iter_mapping[self.rank] + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + ddp_optim = torch.optim.SGD( + model.parameters(), lr=learning_rate * dist.get_world_size() + ) + with net.join(): + for i in range(num_iters): + ddp_optim.zero_grad() + out = net(inp) + loss = out.sum() + loss.backward() + torch.cuda.synchronize(device=self.rank) + ddp_optim.step() + + # Validate model state dicts are equal + for (_, local_tensor), (_, dist_tensor) in zip( + local_model.state_dict().items(), net.module.state_dict().items() + ): + self.assertEqual(local_tensor, dist_tensor) + + def _run_uneven_inputs_test( + self, + test_case, + iteration_mapping, + find_unused_params, + ): + model = test_case.model + inp = test_case.inp + rank = self.rank + sync_interval = test_case.sync_interval + torch.cuda.set_device(rank) + # Ensure all outstanding GPU work is completed so this test runs independently. + dist.barrier() + # Bucket_cap_mb is intentionally low to test allreduce scheduling when + # there are many buckets. + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(rank), + device_ids=[rank], + bucket_cap_mb=1, + find_unused_parameters=find_unused_params, + ) + # Register hook if specified + if test_case.hook is not None: + net.register_comm_hook(test_case.state, test_case.hook) + print(f"registered hook {test_case.hook}") + + # Determine num iters for this rank via the passed in mapping. + num_iters = iteration_mapping[rank] + # If we throw when earliest rank terminates, we should ensure + # that we iterate for that minimum number of times. + num_iters_tensor = torch.tensor( + [num_iters], device=torch.cuda.current_device() + ) + dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN) + min_num_iters = num_iters_tensor.item() + total_iters = 0 + if test_case.throw_on_early_termination: + if min_num_iters == num_iters: + # Early termination rank(s) + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + else: + exception_ctx = nullcontext() + with exception_ctx: + with net.join( + throw_on_early_termination=test_case.throw_on_early_termination + ): + for i in range(num_iters): + # Use model.no_sync() to disable grad synchronization every + # sync_interval. + if i % sync_interval != 0: + context = net.no_sync() + else: + context = nullcontext() + with context: + if isinstance(inp, tuple): + loss = net(*inp).sum() + else: + loss = net(inp).sum() + loss.backward() + self._model_step(net) + # Ensure completion of GPU kernels (including allreduce). If the + # join API is not properly implemented, then this should hang + # since the allreduce will hang. + torch.cuda.synchronize(device=rank) + total_iters += 1 + if test_case.throw_on_early_termination: + # Ensure we iterated min_num_iters times. + self.assertEqual(total_iters, min_num_iters) + else: + # Ensure we iterated at least min_num_iters times. + self.assertGreaterEqual(total_iters, min_num_iters) + + # Ensure completion of all GPU kernels. + torch.cuda.synchronize(device=rank) + # When throwing on early rank termination, we do not + # broadcast model state from an authoritative rank. All models + # should already be in sync. + if not test_case.throw_on_early_termination: + self.assertTrue(net._authoritative_rank) + # All ranks should have agreed on the same authoritative_rank! + final_rank_tensor = torch.tensor( + [net._authoritative_rank], device=self.rank + ) + tensor_list = [ + torch.zeros_like(final_rank_tensor) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(tensor_list, final_rank_tensor) + max_rank = dist.get_world_size() - 1 + self.assertSetEqual( + {max_rank}, {tensor.item() for tensor in tensor_list} + ) + # Ensure that all models are the same across ranks after all have joined. + self.validate_net_equivalence(net) + # Ensure that running with DDP uneven inputs was logged. + ddp_logging_data = net._get_ddp_logging_data() + self.assertTrue(ddp_logging_data.get("join_uneven_inputs")) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs_stop_iteration_sync_bn(self): + # Tests that uneven inputs join handler correctly throws StopIteration + # for models with SyncBN or general collective comm when + # throw_on_early_termination=True. + class ModelWithComm(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(2, 40, bias=False) + + def forward(self, x): + x = self.lin(x) + dist.all_reduce(x) + return x + + torch.cuda.set_device(self.rank) + model_bn = BN_NET + model_bn = nn.SyncBatchNorm.convert_sync_batchnorm( + copy.deepcopy(model_bn) + ).cuda(self.rank) + comm_model = ModelWithComm().cuda(self.rank) + model_input = torch.randn(10, 2).cuda(torch.cuda.current_device()) + + for model in [model_bn, comm_model]: + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + min_num_iters = 5 + if self.rank != 0: + # Early termination rank(s) + num_iters = min_num_iters + exception_ctx = self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} exhausted all inputs" + ) + else: + # Non early termination rank + num_iters = min_num_iters * 2 + exception_ctx = self.assertRaisesRegex( + RuntimeError, + "Detected at least one rank that exhausted inputs.", + ) + n = 0 + with exception_ctx: + with model.join(throw_on_early_termination=True): + for i in range(num_iters): + loss = model(model_input).sum() + loss.backward() + self._model_step(model) + n += 1 + + self.assertEqual(n, min_num_iters) + # Verify model equivalence + self.validate_net_equivalence(model) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_inputs(self): + dim = 1000 + batch = 1 + # Create a variety of models to run uneven input tests on. + large_model = nn.Sequential( + nn.Conv2d(1, 20, 5), + nn.ReLU(), + nn.Conv2d(20, 32, 5), + nn.ReLU(), + nn.Conv2d(32, 256, 5), + nn.ReLU(), + ) + small_model = nn.Linear(dim, dim, bias=False) + bn_net = BatchNormNet() + + class UnusedParamModule(nn.Module): + def __init__(self, unused_params_rank): + super().__init__() + self.t0 = Task() + self.t1 = Task() + self.unused_params_rank = unused_params_rank + + def task_parameters(self): + return (self.t0.p, self.t1.p) + + def forward(self, x, rank): + return ( + self.t1(self.t0(x)) + if rank != self.unused_params_rank + else self.t1(x) + ) + + unjoined_rank_with_unused_params_model = UnusedParamModule(1) + joined_rank_with_unused_params_model = UnusedParamModule(0) + + rank = self.rank + models_to_test = [ + # Network with batchnorm + DDPUnevenTestInput( + name="batch_norm_net", + model=bn_net, + inp=torch.ones(batch, 2, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="large_conv_model", + model=large_model, + inp=torch.ones(batch, batch, dim, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model", + model=small_model, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + # Unused parameter test where rank that does not join early has unused params + DDPUnevenTestInput( + name="unjoined_rank_with_unused_params_model", + model=unjoined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + # Unused parameter test where rank that does join early has unused params + DDPUnevenTestInput( + name="joined_rank_with_unused_params_model", + model=joined_rank_with_unused_params_model, + inp=(torch.ones(batch, 2, device=rank), rank), + sync_interval=1, + ), + ] + + # Test models that have hook installed. + models_with_hook = [ + DDPUnevenTestInput( + name="small_model_allreduce_hook", + model=small_model, + hook=default.allreduce_hook, + state=None, + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + DDPUnevenTestInput( + name="small_model_power_sgd_hook", + model=small_model, + hook=powerSGD.powerSGD_hook, + state=powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + # Config so that powerSGD runs immediately instead of + # allreduce. + start_powerSGD_iter=1, + warm_start=False, + use_error_feedback=False, + ), + inp=torch.ones(batch, dim, device=rank), + sync_interval=1, + ), + ] + models_to_test.extend(models_with_hook) + + # Add resnet model if we have torchvision installed. + if HAS_TORCHVISION: + resnet_model = torchvision.models.resnet50() + models_to_test.append( + DDPUnevenTestInput( + name="resnet_model", + model=resnet_model, + inp=torch.ones(1, 3, 1000, 1000), + sync_interval=1, + ) + ) + + # Test with no_sync every 2, 3, 4, ... iterations. + models_with_sync = [] + for i, test_input in enumerate(models_to_test): + models_with_sync.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=i + 2, + ) + ) + + throw_on_early_term_tests = [] + for test_input in models_to_test: + throw_on_early_term_tests.append( + DDPUnevenTestInput( + name=test_input.name, + model=test_input.model, + inp=test_input.inp, + sync_interval=test_input.sync_interval, + throw_on_early_termination=True, + ) + ) + + models_to_test.extend(models_with_sync) + models_to_test.extend(throw_on_early_term_tests) + + # 0 iteration tests for when one process does not train model at all, so + # we must shadow the broadcast calls made when rebuilding buckets. + baseline_num_iters = [0, 5] + iteration_offsets = [2, 3, 10] + num_uneven_ranks = [1] + if dist.get_world_size() > 2: + num_uneven_ranks.append(2) + iteration_mappings = [] + # Generate rank : num_iters mappings for various uneven input scenarios. + # This includes cases where rank 0 joins early and all other ranks join + # later, and scenarios where multiple ranks join early, but at different + # iterations, and later ranks join later. + for num_early_join_ranks in num_uneven_ranks: + for baseline_iter in baseline_num_iters: + for offset in iteration_offsets: + mapping = dict.fromkeys(range(0, num_early_join_ranks), baseline_iter) + # if num_early_join_ranks > 1, ranks > 0 that will join early + # iterate offset//2 more times than rank 0, to test nodes + # depleting inputs at different times. + if num_early_join_ranks > 1: + for rank in mapping.keys(): + if rank > 0: + mapping[rank] += offset // 2 + mapping.update( + dict.fromkeys(range(num_early_join_ranks, dist.get_world_size()), baseline_iter + offset) + ) + iteration_mappings.append(mapping) + + for (test_case, iteration_mapping) in itertools.product( + models_to_test, iteration_mappings + ): + if self.rank == 0: + print( + f"""Running test: {test_case.name} sync interval + {test_case.sync_interval} with iteration mapping + {iteration_mapping}""" + ) + self._run_uneven_inputs_test( + test_case, + iteration_mapping, + find_unused_params=("unused_params_model" in test_case.name), + ) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_join_disable(self): + # tests that if net.join() with enable=False is specified, DDP works as + # expected with even inputs. + torch.manual_seed(self.rank) + net = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) * self.rank + n_iters = 5 + world_size = dist.get_world_size() + with net.join(enable=False): + for _ in range(n_iters): + # Clear grads + grad = net.module.weight.grad + if grad is not None: + grad.requires_grad_(False) + grad.zero_() + out = net(inp) + loss = out.sum() + loss.backward() + # Validate gradients to ensure that we divide by the correct + # world_size when join mode is disabled. + expected_grad = sum(i for i in range(world_size)) / world_size + self.assertEqual(net.module.weight.grad.item(), expected_grad) + + join_config = net._join_config + self.assertFalse(join_config.enable) + self.validate_net_equivalence(net) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_uneven_input_exception(self): + # Tests that exceptions during training are correctly propagated by the + # context manager. + error_str = "Intentional error" + + class ExceptionModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.param = nn.Parameter(torch.ones(1, requires_grad=True)) + + def forward(self, _): + raise ValueError(error_str) + + exception_module = ExceptionModule() + net = torch.nn.parallel.DistributedDataParallel( + exception_module.cuda(self.rank), device_ids=[self.rank] + ) + inp = torch.ones(1) + with self.assertRaisesRegex(ValueError, error_str): + with net.join(): + out = net(inp) + loss = out.sum() + loss.backward() + + def _test_broadcast_object_list(self, group=None): + gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() + + # Only set device for NCCL backend since it must use GPUs. + # Case where rank != GPU device. + next_rank = (self.rank + 1) % int(self.world_size) + backend = os.environ["BACKEND"] + if backend == "nccl": + torch.cuda.set_device(next_rank) + + src_rank = 0 + # If GPU test, add object with GPU tensor + if backend == "nccl": + gather_objects.append(Foo(torch.randn(3, 3, device=0))) + + if IS_FBCODE: + # Create Tensor with > 2^31 Bytes storage requirements + # Only on FBCODE as testing OOMs in OSS + gather_objects.append(Foo(torch.randn(3, 178956971))) + objects = ( + gather_objects + if self.rank == src_rank + else [None for _ in gather_objects] + ) + + # Single object test with device specified. Backend="gloo", device=cpu + if backend != "nccl": + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device("cpu") + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="gloo", device=current_device+1 + # The test is gated by the fact GPU count is the same as world size to avoid the case + # when backend is gloo but there is no multiple GPU devices. + if backend != "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test with device specified. Backend="nccl", device=current_device+1 + if backend == "nccl" and torch.cuda.device_count() == int(self.world_size): + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list( + single_obj_list, src=0, group=group, device=torch.device(next_rank) + ) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Single object test: backward compatibility with device unspecified + single_obj_list = [objects[0]] + if self.rank != src_rank: + self.assertNotEqual(single_obj_list[0], gather_objects[0]) + dist.broadcast_object_list(single_obj_list, src=0, group=group) + self.assertEqual(single_obj_list[0], gather_objects[0]) + + # Multiple input objects test + if self.rank != src_rank: + self.assertNotEqual(objects, gather_objects) + dist.broadcast_object_list(objects, src=0, group=group) + self.assertEqual(objects, gather_objects) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_broadcast_object_list(self): + return self._test_broadcast_object_list() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @require_n_gpus_for_nccl_backend( + int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] + ) + @with_dist_debug_levels(levels=["DETAIL"]) + def _test_broadcast_object_list_subgroup(self): + default = _get_default_group() + backend = dist.get_backend(default) + subgroup = dist.new_group(backend=backend) + return self._test_broadcast_object_list(subgroup) + + def _test_ddp_ignore_params_arg(self, static_graph=False): + class TestModel(nn.Module): + def __init__(self, rank): + self.rank = rank + super().__init__() + self.fc1 = nn.Linear(1, 1, bias=False) + # Proxy that will be materialized to another architecture later. + # (after wrapping model with DDP) + if self.rank == 0: + self.fc2 = nn.Linear(1, 10, bias=False) + else: + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + device_id = self.rank + # Ensure the test works for both find_unused_parameter and broadcast_buffer settings. + for (find_unused, broadcast_buffers) in itertools.product( + [False, True], [False, True] + ): + model = TestModel(self.rank).float().to(device_id) + # Note that the model can have different shape buffers if we pass + # them in to be ignored as well. + model.fc2.register_buffer( + "ignore_buffer", torch.zeros(5 + self.rank, device=self.rank) + ) + proxy_params = list(model.fc2.parameters()) + proxy_buffers = list(model.fc2.buffers()) + model_fc2_name = next( + module_name + for module_name, module in model.named_modules() + if module is model.fc2 + ) + proxy_param_names = [ + f"{model_fc2_name}.{param_name}" + for param_name, _ in model.fc2.named_parameters() + ] + proxy_buffer_names = [ + f"{model_fc2_name}.{buf_name}" + for buf_name, _ in model.fc2.named_buffers() + ] + # Specify that we should ignore proxy_params since it will be + # materialized later. + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, proxy_param_names + proxy_buffer_names + ) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[device_id], + find_unused_parameters=find_unused, + broadcast_buffers=broadcast_buffers, + static_graph=static_graph, + ) + # Materialize new params. These are not registered in DDP and thus + # don't have autograd hooks installed on them. + ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id) + + # local model with the new materialized parameters. + local_model = copy.deepcopy(ddp.module).cuda(self.rank) + + inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1) + for i in range(6): + ddp(inp).sum().backward() + + local_model(inp).sum().backward() + # materialized param grad is not touched by DDP, so its grad should + # be the same as if running locally. + for materialized_param, local_param in zip( + ddp.module.fc2.parameters(), local_model.fc2.parameters() + ): + self.assertEqual(materialized_param.grad, local_param.grad) + + # fc1 parameter grad should still be different, due to allreduce. + for synced_param, local_param in zip( + ddp.module.fc1.parameters(), local_model.fc1.parameters() + ): + self.assertFalse(synced_param.grad == local_param.grad) + + # Proxy module grad should not be touched + for proxy_param in proxy_params: + self.assertTrue(proxy_param.grad is None) + + # Synchronize since we run multiple iterations of this test, to + # isolate failure hangs. + torch.cuda.synchronize(device=self.rank) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_ignore_params_arg(self): + self._test_ddp_ignore_params_arg(static_graph=False) + self._test_ddp_ignore_params_arg(static_graph=True) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_unused_params_rebuild_buckets_exception(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.net1(x) + + ddp = torch.nn.parallel.DistributedDataParallel( + ToyModel().cuda(self.rank), device_ids=[self.rank] + ) + for i in range(2): + inp = torch.rand(1, 10) + if i > 0: + # On 2nd iteration, this will fail during rebuild_buckets, + # but we should report an error regarding unused parameters + # since that is the underlying root cause. + try: + ddp(inp).sum().backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(ddp, msg) + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["net2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse( + True, "DDP unused parameters error not raised." + ) + else: + ddp(inp).sum().backward() + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_shared_grad_acc_unused_params(self): + # When find_unused_parameters=True, ensure we mark unused parameters + # even if they share gradient accumulators. + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + # net1, bias, and net1.bias are all unused params. + self.net1 = nn.Linear(10, 5, bias=False) + self.bias = nn.Parameter(torch.zeros(5)) + # net1.bias and self.bias are names for the same underlying + # parameter, so they share the same grad acc. This caused + # the bug reported in https://github.com/pytorch/pytorch/issues/41324. + self.net1.bias = self.bias + self.net2 = nn.Linear(10, 5) + + def forward(self, x): + return self.net2(x).sum() + + torch.cuda.set_device(self.rank) + model = ToyModel().to(torch.cuda.current_device()) + for static in [True, False]: + ddp_model = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + find_unused_parameters=True, + static_graph=static, + ) + inp = torch.randn(20, 10, device=self.rank) + for i in range(6): + loss = ddp_model(inp) + # To test https://github.com/pytorch/pytorch/issues/61982 + loss /= 10 + loss.backward() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device(self): + m = nn.Linear(10, 10).to(self.rank) + expected_len = 2 + + class TensorWrapper: + __slots__ = ["t", "moved_to_gpu"] + + def __init__(self, t): + self.t = t + self.moved_to_gpu = False + + # Handlers for specific types of validation we want to do based on + # the input type. + + def tuple_and_list_validator(x): + self.assertTrue(len(x), expected_len) + self.assertEqual(1, len({t.device for t in x})) + self.assertEqual(x[0].device.index, self.rank) + return x[0] + x[1] + + def namedtuple_validator(x): + self.assertEqual(x._fields, EXPECTED_FIELDS) + self.assertEqual(x.a.device.index, x.b.device.index) + self.assertEqual(x.a.device.index, self.rank) + return x.a + x.b + + def custom_type_validator(x): + self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu")) + x.t = x.t.to(self.rank) + x.moved_to_gpu = True + return x.t + + def dict_validator(x): + self.assertTrue(EXPECTED_FIELDS[0] in x.keys()) + self.assertTrue(EXPECTED_FIELDS[1] in x.keys()) + self.assertEqual(1, len({t.device for t in x.values()})) + self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank) + return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]] + + validators = { + TensorWrapper: custom_type_validator, + tuple: tuple_and_list_validator, + list: tuple_and_list_validator, + TestNamedTupleInput_0: namedtuple_validator, + TestNamedTupleInput_1: namedtuple_validator, + dict: dict_validator, + } + + class ToyModel(torch.nn.Module): + def __init__(self_): # noqa: B902 + super().__init__() + self_.lin = nn.Linear(10, 10, bias=False) + + def forward(self_, x, expected_type): # noqa: B902 + # Similar to scatter, the recursive to in the single-device + # case does not move tensors if they are in a custom type. + self.assertTrue(isinstance(x, expected_type)) + fwd_tensor = validators[expected_type](x) + return self_.lin(fwd_tensor) + + model = torch.nn.parallel.DistributedDataParallel( + ToyModel().to(self.rank), device_ids=[self.rank] + ) + + def train_iter(inp, input_type): + for _ in range(4): + out = model(inp, input_type) + out.sum().backward() + + # CPU tuple input, should be moved to the proper device before call + # to forward. + inp = tuple(torch.randn(10, 10) for _ in range(expected_len)) + train_iter(inp, tuple) + + # List CPU input, should be moved to proper device before call to + # forward. + inp = [torch.randn(10, 10) for _ in range(expected_len)] + train_iter(inp, list) + # Custom type containing tensor. The type is maintained, but the + # device is not propagated (which is what happens with scatter too) + inp = TensorWrapper(torch.randn(10, 10)) + train_iter(inp, TensorWrapper) + # NamedTuple input. The type should be maintained and tensor inputs + # should be moved to the correct device as in scatter. + batch = 5 + dim = 10 + a = torch.rand(batch, dim) + b = torch.rand(batch, dim) + + inp = TestNamedTupleInput_0(a, b) + train_iter(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + train_iter(inp, type(inp)) + + # dictionary input. + inp = { + EXPECTED_FIELDS[0]: a, + EXPECTED_FIELDS[1]: b, + } + train_iter(inp, type(inp)) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_namedtuple(self): + batch = 5 + dim = 10 + + a = torch.rand(batch, dim, device=self.rank) + b = torch.rand(batch, dim, device=self.rank) + + class NamedTupleModule(torch.nn.Module): + def __init__(self_): # noqa: B902 + super().__init__() + self_.lin = nn.Linear(10, 1) + + def forward(self_, input, expected_type): # noqa: B902 + # Without NamedTuple support, this would be of type tuple. + self.assertTrue( + isinstance(input, expected_type), + f"Expected type {expected_type} but got {type(input)}", + ) + self.assertEqual(input._fields, EXPECTED_FIELDS) + self.assertEqual(a, input.a) + self.assertEqual(b, input.b) + return self_.lin(torch.mul(input.a, input.b)) + + model = torch.nn.parallel.DistributedDataParallel( + NamedTupleModule().cuda(self.rank), device_ids=[self.rank] + ) + inp = TestNamedTupleInput_0(a, b) + # The following would fail if DDP does not propagate NamedTuples correctly. + model(inp, type(inp)) + + inp = TestNamedTupleInput_1(a, b) + model(inp, type(inp)) + + @require_backend_is_available({"gloo"}) + def test_grads_same_across_ranks_with_no_sync(self): + group, group_id, rank = self._init_global_test() + world_size = dist.get_world_size() + if world_size < 2: + self.skipTest("This test requires at least two ranks.") + + class SimpleConditionalModel(nn.Module): + # if rank is 0, uses nn1 on the first pass and nn2 on the second pass. + # else, uses nn3 on the first pass and nn4 on the second pass. + + def __init__(self, rank): + super().__init__() + + self.rank = rank + self.nn1 = nn.Linear(1, 1) + self.nn2 = nn.Linear(1, 1) + self.nn3 = nn.Linear(1, 1) + self.nn4 = nn.Linear(1, 1) + self.state = 0 + + def forward(self, input): + if self.state == 0: + self.state = 1 + if self.rank == 0: + return self.nn1(input) + else: + return self.nn3(input) + else: + self.state = 0 + if self.rank == 0: + return self.nn2(input) + else: + return self.nn4(input) + + model = torch.nn.parallel.DistributedDataParallel( + SimpleConditionalModel(rank), find_unused_parameters=True + ) + mse_loss = nn.MSELoss() + grad_accumulation = 2 + + for microbatch_idx in range(grad_accumulation): + if microbatch_idx < grad_accumulation - 1: + context = model.no_sync + else: + context = nullcontext + + with context(): + input = torch.rand((1, )) + output = model.forward(input) + target = torch.rand((1, )) + + loss = mse_loss(output, target) + loss.backward() + + self.assertTrue( + not any(p.grad is None for p in model.parameters()), + "Gradients can't be None for any model parameter." + ) + grads = torch.cat([p.grad.view(-1) for p in model.parameters()]) + + # Gather all gradients to rank 0. + if rank == 0: + gathered_grads = [torch.zeros_like(grads) for _ in range(world_size)] + else: + gathered_grads = [] + + dist.gather(grads, gather_list=gathered_grads, dst=0) + if rank == 0: + for g in gathered_grads[1:]: + self.assertTrue( + torch.allclose(gathered_grads[0], g), + "Gradients are not the same for all ranks." + ) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_same_across_ranks(self): + # Control flow that is the same across ranks. + batch = 20 + dim = 10 + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used. + local_used_map = model.reducer._get_local_used_map() + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, world_size], device=self.rank, dtype=torch.int32 + ) + + # Validate parameter usage. + variable_usage_tensor = local_used_map + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + # 2nd linear layer is unused + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_invalid_static_graph(self): + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ControlFlowToyModel().cuda(self.rank), + device_ids=[self.rank], + static_graph=True, + ) + random_input = torch.randn(20, 10, device=self.rank) + ones_input = torch.ones(20, 10, device=self.rank) + # unused parameter in the first iteration got used + # in second iteration. + expected_err = "Your training graph has changed in this iteration" + with self.assertRaisesRegex(RuntimeError, expected_err): + for i in range(2): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, expected_err) + + # used parameter in the first iteration got unused + # in second iteration. + with self.assertRaisesRegex( + RuntimeError, + "Expected to have finished reduction in the prior iteration " + "before starting a new one. This error indicates that your " + "training graph has changed in this iteration, " + "e.g., one parameter is used in first iteration, " + "but then got unused in the second iteration. " + "this is not compatible with static_graph set to True.\n" + "Parameter indices which did not receive grad for", + ): + for i in range(2): + if i % 2 != 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + + verify_ddp_error_logged(model, "Expected to have finished reduction") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_control_flow_different_across_ranks(self): + # Control flow that is different across ranks. + batch = 20 + dim = 10 + + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + # Control-flow that is rank and input dependent for the + # model. + use_second_layer = ( + torch.equal(x, torch.ones(batch, dim, device=x.device)) + and self.rank == 1 + ) + + if use_second_layer: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + random_input = torch.randn(batch, dim, device=self.rank) + ones_input = torch.ones(batch, dim, device=self.rank) + for i in range(6): + if i % 2 == 0: + out = model(random_input) + else: + out = model(ones_input) + loss = out.sum() + loss.backward() + # On even iterations, 2nd param goes unused, on odd iterations, + # it is used only on rank 1. + local_used_map = model.reducer._get_local_used_map() + + if i % 2 == 0: + expected = torch.tensor( + [world_size, 0], device=self.rank, dtype=torch.int32 + ) + else: + expected = torch.tensor( + [world_size, 1], device=self.rank, dtype=torch.int32 + ) + + variable_usage_tensor = local_used_map + # Validate parameter usage. On odd iterations, 2nd param is only + # used on rank 1. + self.assertEqual(variable_usage_tensor, expected) + + # Validate appropriate error message when DDP is used with + # find_unused_parameters=False. + model = torch.nn.parallel.DistributedDataParallel( + ToyModel(self.rank).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + for i in range(2): + if i == 0: + loss = model(random_input).sum() + loss.backward() + else: + try: + loss = model(random_input).sum() + loss.backward() + except RuntimeError as e: + msg = str(e) + verify_ddp_error_logged(model, msg) + unused_param_index = 1 + expected_strs = [ + ddp_prev_reduction_unfinished_str, + ddp_recommend_find_unused_params_str, + ddp_outputs_not_used_in_loss_str, + f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", + ] + # In debug mode, should show parameters that weren't reduced. + # Without debug mode, should show suggestion to use debug mode. + if dist.get_debug_level() == dist.DebugLevel.OFF: + expected_strs.append(ddp_suggest_debug_mode_str) + else: + unreduced_params = ", ".join(["lin2.weight"]) + expected_strs.append( + f"did not receive grad for rank {self.rank}: {unreduced_params}" + ) + for s in expected_strs: + self.assertTrue(s in msg, f"Expected {s} to be in {msg}") + self.assertFalse(ddp_find_unused_params_enabled_str in msg) + else: + self.assertFalse(True, "DDP error not raised") + + dist.barrier() + + @require_backend_is_available({"gloo"}) + def test_scatter_object_list(self): + src_rank = 0 + scatter_list = ( + COLLECTIVES_OBJECT_TEST_LIST + if self.rank == src_rank + else [None for _ in COLLECTIVES_OBJECT_TEST_LIST] + ) + world_size = dist.get_world_size() + scatter_list = scatter_list[:world_size] + i = 0 + while len(scatter_list) < world_size: + scatter_list.append(scatter_list[i]) + i += 1 + + output_obj_list = [None] + dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank) + self.assertEqual( + output_obj_list[0], + COLLECTIVES_OBJECT_TEST_LIST[ + self.rank % len(COLLECTIVES_OBJECT_TEST_LIST) + ], + ) + # Ensure errors are raised upon incorrect arguments. + with self.assertRaisesRegex( + ValueError, + "Expected argument scatter_object_output_list to be a list of size at least 1.", + ): + dist.scatter_object_list([], scatter_list, src=src_rank) + + def _generate_sparse_tensors_for_bucket_assignment_test(self): + tensors = [ + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + torch.empty([50], dtype=torch.float), + torch.empty([25], dtype=torch.double), + ] + + tensors_sparse = [t.to_sparse() for t in tensors] + return tensors_sparse + + def _test_compute_bucket_assignment_by_size(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + + # Create a valid model. The constructor initializes the logger that we use later. + # We never actually use the rest of the model - we only need its logger. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # if we don't pass a logger then we can only check that an exception was thrown. + expected_err = "No support for sparse tensors." + with self.assertRaisesRegex(RuntimeError, expected_err): + tensors_sparse = ( + self._generate_sparse_tensors_for_bucket_assignment_test() + ) + if use_logger: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400], logger=net.logger + ) + else: + result = dist._compute_bucket_assignment_by_size( + tensors_sparse, [400] + ) + if use_logger: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=False) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self): + self._test_compute_bucket_assignment_by_size(use_logger=True) + + def _determine_expected_error_verify_model_across_rank( + self, group_to_use, diff_num_params=False + ): + # When running with NCCL backend, we don't expect an error on rank 0, + # rather, it will be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING. When + # running with Gloo or with debug mode wrapper, we expect the error + # to be caught inline. + # All ranks report same error when there is a # of parameter + # mismatch since we use allgather in the impl. + if diff_num_params: + expected_err = "DDP expects same model across all ranks" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + is_detail_dbg_mode = dist.get_debug_level() == dist.DebugLevel.DETAIL + if self.rank == 0: + if ( + dist.get_backend(group_to_use) == dist.Backend.NCCL + and not is_detail_dbg_mode + ): + expected_err = "caught collective operation timeout" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + else: + expected_err = None + ctx = self.assertRaises(RuntimeError) + else: + expected_err = "appears not to match" + ctx = self.assertRaisesRegex(RuntimeError, expected_err) + return ctx, expected_err + + def _test_verify_model_across_rank(self, use_logger): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=5) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + + # Create a valid model. The constructor initializes the logger that we use later. + net = EmbeddingNetDifferentParams(0) + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), + device_ids=[self.rank], + process_group=group_to_use, + ) + + # Modify the model so that the number of parameters are different for each rank. + # This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes, + # so we can check if the correct error is thrown and is logged. + # We can't do this in the constructor above otherwise the logger will + # not be properly initialized. + net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1) + + # if we pass a logger we can verify that it was logged + with ctx: + if use_logger: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()), net.logger + ) + else: + _verify_param_shape_across_processes( + net.process_group, list(net.parameters()) + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(group_to_use) + + # We don't check when self.rank != 0 because the logger doesn't log + # the error "Caught collective operation" as that is not thrown in the reducer. + if use_logger and self.rank != 0: + verify_ddp_error_logged(net, expected_err) + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_with_logger(self): + self._test_verify_model_across_rank(use_logger=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_verify_model_across_rank_without_logger(self): + self._test_verify_model_across_rank(use_logger=False) + + def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo): + with ctx: + net = torch.nn.parallel.DistributedDataParallel( + net.to(self.rank), device_ids=[self.rank], process_group=ddp_group + ) + # Should only be run by rank 0, and blocking_wait catches and + # reports exception. + dist.barrier(ddp_group) + + # can't use verify_ddp_error_logged here because net was never properly constructed + + # Perform gloo-based barrier to ensure one rank doesn't exit test + # early which causes failure with Barrier.sync. + dist.barrier(group_gloo) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_shape_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use + ) + # Creates network with different sized embedding table on different + # ranks. This should throw an error during DDP init. + net = EmbeddingNetDifferentParams(self.rank) + self._run_test_ddp_model_with_diff_params( + ctx, net, group_to_use, group_gloo + ) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_but_pass_in_sandcastle_if( + BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" + ) + @skip_if_lt_x_gpu(2) + def test_ddp_model_diff_num_params_across_ranks(self): + group_gloo = dist.new_group( + timeout=timedelta(seconds=60), backend=dist.Backend.GLOO + ) + # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test + # determinism. + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + group_to_use = dist.new_group( + backend=dist.get_backend(), timeout=timedelta(seconds=10) + ) + torch.cuda.set_device(self.rank) + ctx, expected_err = self._determine_expected_error_verify_model_across_rank( + group_to_use, diff_num_params=True + ) + + # Creates network with diff # of param across ranks, reducer should + # recognize this and throw appropriate error. + net = EmbeddingNetDifferentParams( + self.rank, diff_num_params=(self.rank == 1) + ) + + self._run_test_ddp_model_with_diff_params( + ctx, + net, + group_to_use, + group_gloo, + ) + + def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view): + model = module_cls() + local_net = copy.deepcopy(model) + net = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + ) + + # Tests that certain parameters not getting gradient since the + # output is unused in loss computation is supported. Specifically, + # checks that the grads remain unchanged and are the same as local + # training. + inp = torch.randn(10, 10) + + # Ensure that if a param is not used in loss computation, its + # gradient is untouched, i.e. if it is None before it is None after, + # not zero. + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + + loss_dist = b_dist.sum() + loss_dist.backward() + + # Ensure that gradient corresponding to parameter "a" was not + # touched, i.e. it is None and matches the local grad. + if module_cls == DictOutputModule: + self.assertTrue(net.module.module.a.weight.grad is None) + self.assertEqual( + net.module.module.a.weight.grad, local_net.module.a.weight.grad + ) + else: + self.assertTrue(net.module.a.weight.grad is None) + self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad) + + saved_a_local_grad = None + saved_a_dist_grad = None + net.zero_grad() + local_net.zero_grad() + for i in range(6): + if module_cls == DictOutputModule: + a, b = local_net(inp)["predictions"] + a_dist, b_dist = net(inp)["predictions"] + else: + a, b = local_net(inp) + a_dist, b_dist = net(inp) + if i < 2: + # Use both params in loss computation. Later, "a" will go + # unused and we check to ensure DDP supports this and + # gradients remain the same as local training. + t = a @ b + t_dist = a_dist @ b_dist + loss = t.sum() + loss_dist = t_dist.sum() + else: + # Model output "a" unused in loss. + loss = b.sum() + loss_dist = b_dist.sum() + loss.backward() + loss_dist.backward() + if i == 1: + # Save grads to compare with them in next iterations. + if module_cls == DictOutputModule: + saved_a_local_grad = local_net.module.a.weight.grad + saved_a_dist_grad = net.module.module.a.weight.grad + else: + saved_a_local_grad = local_net.a.weight.grad + saved_a_dist_grad = net.module.a.weight.grad + self.assertEqual(saved_a_local_grad, saved_a_dist_grad) + elif i >= 2: + # parameter "a" of both models should be the same and not change + if module_cls == DictOutputModule: + self.assertEqual( + net.module.module.a.weight.grad, saved_a_dist_grad + ) + self.assertEqual( + local_net.module.a.weight.grad, saved_a_local_grad + ) + else: + self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad) + self.assertEqual(local_net.a.weight.grad, saved_a_local_grad) + + # Verify grads are the same + for (local_param, dist_param) in zip( + local_net.parameters(), net.parameters() + ): + local_grad = local_param.grad + dist_grad = dist_param.grad + self.assertEqual(local_grad, dist_grad) + + dist.barrier() + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_tuple_module(self): + module_cls = UnusedParamTwoLinLayerNet + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_output_unused_in_loss_dict_module(self): + module_cls = DictOutputModule + for grad_as_bucket_view in [True, False]: + self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_undefined_grad_parity_unused_parameters(self): + # TODO: enable this for general training use cases: + # https://github.com/pytorch/pytorch/issues/58511. + x = torch.ones(1, 2).to(self.rank) + net = Net().to(self.rank) + local_net = copy.deepcopy(net) + net = torch.nn.parallel.DistributedDataParallel( + net, + device_ids=[self.rank], + find_unused_parameters=True, + ) + out = net(x).sum() + local_out = local_net(x).sum() + # Simulates undefined gradients. + torch._C._functions.UndefinedGrad()(out).backward() + torch._C._functions.UndefinedGrad()(local_out).backward() + for (dist_param_name, dist_param), (local_param_name, local_param) in zip( + net.named_parameters(), local_net.named_parameters() + ): + dist_grad = dist_param.grad + local_grad = local_param.grad + self.assertEqual( + dist_grad, + local_grad, + f"""DDP param {dist_param_name} with grad {dist_grad} + does not match local param {local_param_name} with grad + {local_grad}""", + ) + + def _test_different_graph_across_ranks( + self, find_unused_parameters=False, static_graph=False + ): + class ToyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.lin1 = nn.Linear(10, 10, bias=False) + self.lin2 = nn.Linear(10, 10, bias=False) + self.rank = rank + + def forward(self, x): + if self.rank == 0: + return self.lin2(F.relu(self.lin1(x))) + else: + return F.relu(self.lin1(x)) + + torch.manual_seed(31415) + world_size = dist.get_world_size() + torch.cuda.set_device(self.rank) + model = ToyModel(self.rank).cuda(self.rank) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused_parameters, + gradient_as_bucket_view=True, + static_graph=static_graph, + ) + random_input = torch.randn(20, 10, device=self.rank) + for i in range(10): + out = ddp_model(random_input) + loss = out.sum() + loss.backward() + return ddp_model + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_different_graph_across_ranks(self): + base_model = self._test_different_graph_across_ranks( + find_unused_parameters=True + ) + self.assertFalse( + base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + static_model = self._test_different_graph_across_ranks(static_graph=True) + self.assertTrue( + static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) + ) + for i, j in zip(base_model.parameters(), static_model.parameters()): + self.assertEqual(i, j) + + @require_backend_is_available({"gloo"}) + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_gloo(self): + tensors = [torch.ones(10) * self.rank] + # Kick off some allreduce work on all ranks + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + # Run monitored barrier and ensure it passes + timeout = timedelta(seconds=2) + dist.monitored_barrier(timeout=timeout) + # Check monitored_barrier success with wait_all_ranks=True + for _ in range(10): + dist.all_reduce(torch.cat(tensors)) + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + # All ranks besides 1 call into barrier, rank 0 should report failure + # while others report gloo error. + failed_rank = 1 + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank != failed_rank: + # Other ranks should not pass barrier since rank 0 failed. + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + # We need a barrier since otherwise failed_rank exits too early + # and cause a timeout. + self._barrier(timeout=30) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_subgroup(self): + # Tests that monitored_barrier works as expected on non-default + # process groups. + failed_rank = 1 + timeout = 0.1 + subgroup = dist.new_group(ranks=[0, 1]) + + if self.rank == failed_rank: + return + + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" + ): + dist.monitored_barrier(subgroup, timeout) + else: + # Other ranks call into monitored_barrier, but this should be a + # noop because they are not part of the subgroup. Verify that + # there are no errors here. + dist.monitored_barrier(subgroup, timeout) + + def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks): + # tests expected behavior when nonzero rank hangs. + nccl_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + # provide sufficient timeout so communicators + # can be initialized in ctor. + timeout=timedelta(seconds=15), + backend=dist.Backend.NCCL, + ) + gloo_pg = dist.new_group( + ranks=list(range(int(self.world_size))), + backend=dist.Backend.GLOO, + ) + tensors = [torch.ones(10, device=self.rank) * self.rank] + # Let all ranks call allreduce first to set up communicators etc. + # Directly simulating error here will run into store issue described + # in https://github.com/pytorch/pytorch/issues/54524. + nccl_pg.allreduce(tensors).wait(timedelta(seconds=5)) + # All ranks besides 0 call into allreduce. This is to simulate a + # desync across the world, where some ranks call into + # monitored_barrier() and others are stuck in collective comm. In + # practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this + # test to ensure it exits cleanly. + if self.rank != 0: + # Can get different errors here depending on whether gloo-based + # wrapper PG is enabled or not, since with wrapper pg, it will + # fail in a collective synchronization check and not actually + # call into the nccl pg. + if dist.get_debug_level() == dist.DebugLevel.DETAIL: + err_regex = "Timed out waiting" + else: + err_regex = "caught collective operation timeout" + with self.assertRaisesRegex(RuntimeError, err_regex): + nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1)) + else: + # Rank 0 should report first (in order) timed out rank or all ranks + # depending on wait_all_ranks flag passed into monitored_barrier. + if wait_all_ranks: + rank_str = ", ".join( + [str(i) for i in range(1, int(self.world_size))] + ) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + else: + expected_first_fail_rank = 1 + err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier" + monitored_barrier_timeout_seconds = timedelta(seconds=0.1) + with self.assertRaisesRegex(RuntimeError, err_regex): + gloo_pg.monitored_barrier( + monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks + ) + + self._barrier(timeout=30) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang(self): + # tests expected behavior when nonzero rank hangs and we want to + # report first timed out rank. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False) + + @with_nccl_blocking_wait + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + def test_monitored_barrier_allreduce_hang_wait_all_ranks(self): + # Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out + os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0" + # tests expected behavior when nonzero rank hangs and we want to + # report all timed out ranks. + self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True) + + @require_backend_is_available({"gloo"}) + def test_monitored_barrier_gloo_rank_0_timeout(self): + # tests error when rank 0 exhausts its given timeout. + process_group = dist.new_group(ranks=list(range(int(self.world_size)))) + timeout = timedelta(seconds=0) + if self.rank == 0: + with self.assertRaisesRegex( + RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier" + ): + process_group.monitored_barrier(timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + @skip_but_pass_in_sandcastle_if( + IS_MACOS or IS_WINDOWS, + "MacOS uses uv transport which does not have as robust error handling as tcp transport", + ) + def test_monitored_barrier_failure_order(self): + # Ensure that the first (in sorted order) rank is reported when + # multiple ranks fail to pass the monitored_barrier. + # TODO(#54879): Provide ability to wait and report all failed ranks + expected_first_failed_rank = 2 + timeout = timedelta(seconds=2) + src_rank = 0 + if self.rank == src_rank: + with self.assertRaisesRegex( + RuntimeError, f"Rank {expected_first_failed_rank}" + ): + dist.monitored_barrier(timeout=timeout) + elif self.rank == 1: + err_regex = ( + f"Rank {self.rank} successfully reached monitoredBarrier," + f" but received errors while waiting for send/recv from rank" + f" {src_rank}" + ) + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout) + + @require_backend_is_available({"gloo"}) + @skip_if_small_worldsize + def test_monitored_barrier_wait_all_ranks(self): + # Tests simple case where > 1 rank does not call into monitored + # barrier and verifies all ranks are reported by rank 0. + if self.rank == 0: + timeout = timedelta(seconds=0.1) + rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))]) + err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" + with self.assertRaisesRegex(RuntimeError, err_regex): + dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping(self): + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "a.weight", 1: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test when DDP is used with ignored parameters. + model = TwoLinLayerNet() + # Parameters to ignore are in the format {module_name}.{param_name} + params_to_ignore = ["a.weight"] + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, params_to_ignore + ) + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + expected_mapping = {0: "b.weight"} + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertDictEqual(expected_mapping, param_to_name_mapping) + + # Test errors are raised when DDP and module parameters mismatch. + # This generally indicates a bug with DDP and is not expected to + # happen in user applications. + model = TwoLinLayerNet() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + net_params, _ = net._build_params_for_reducer() + if self.rank == 0: + print(type(net_params[0])) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + with self.assertRaisesRegex(ValueError, "Expected param to name mapping"): + net._build_debug_param_to_name_mapping(net_params) + + net_params = net_params[:-3] + with self.assertRaisesRegex(ValueError, "Param with name"): + net._build_debug_param_to_name_mapping(net_params) + + net_params.extend( + [ + torch.nn.Parameter(torch.ones(1)), + torch.nn.Parameter(torch.ones(1)), + ] + ) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @with_dist_debug_levels(levels=["INFO"]) + @skip_if_lt_x_gpu(2) + def test_ddp_build_debug_param_to_name_mapping_requires_grad(self): + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(10, 10) + # Is not tracked by DDP and should not show up in param to + # name mapping. + self.lin.bias.requires_grad_(False) + + def forward(self, x): + return self.lin(x) + + model = Net() + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), device_ids=[self.rank] + ) + expected_mapping = { + 0: "lin.weight", + } + net_params, _ = net._build_params_for_reducer() + param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) + self.assertEqual(param_to_name_mapping, expected_mapping) + + def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse): + debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF + + class SubModule(nn.Module): + def __init__(self) -> None: + super().__init__() + self.embedding_net = EmbeddingNetDifferentParams(0) + self.lin = TwoLinLayerNet() + self.bn = BatchNormNet() + self.lin_layer = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.bn(x) + x = self.lin_layer(x) + x = self.lin.a(x) # self.lin.b param unused + # EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and + # self.embedding_net.lin unused. + return x + + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.sub_module = SubModule() + + def forward(self, x): + return self.sub_module(x) + + model = MyModel() + sparse_embedding_fqns = [] + if ignore_sparse: + for module_name, module in model.named_modules(): + if module == model.sub_module.embedding_net.embedding: + for parameter_name, param in module.named_parameters( + recurse=False + ): + fqn = f"{module_name}.{parameter_name}" + sparse_embedding_fqns.append(fqn) + + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, sparse_embedding_fqns + ) + unused_modules = [ + model.sub_module.embedding_net.lin, + model.sub_module.lin.b, + ] + else: + unused_modules = list(model.sub_module.embedding_net.modules()) + [ + model.sub_module.lin.b, + ] + + expected_unused_param_fqns = [] + used_param_fqns = [] # Validate that these don't mistakenly show up. + fqn_to_param_index = {} + index = 0 + for module_name, module in model.named_modules(): + for parameter_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{parameter_name}" + fqn_to_param_index[fqn] = index + if fqn not in sparse_embedding_fqns: + index += 1 + if module in unused_modules: + expected_unused_param_fqns.append(fqn) + else: + if ( + not ignore_sparse + or module != model.sub_module.embedding_net.embedding + ): + used_param_fqns.append(fqn) + + net = torch.nn.parallel.DistributedDataParallel( + model.cuda(self.rank), + device_ids=[self.rank], + ) + batch, dim = 10, 2 + inp = torch.ones(batch, dim) + for i in range(2): + if i == 0: + out = net(inp) + loss = out.sum() + loss.backward() + else: + try: + out = net(inp) + loss = out.sum() + loss.backward() + except RuntimeError as e: + e = str(e) + + unused_param_substr = e[e.find("did not receive grad") :] + # Validate that each unused param fully qualified name + # shows up in error logs. We do this instead of + # constructing a joined string since order of parameters + # can be different in Reducer. In addition, validate + # param indices show up as well. + for unused_param_fqn in expected_unused_param_fqns: + self.assertTrue( + unused_param_fqn in unused_param_substr + or debug_mode_off + ) + self.assertTrue( + str(fqn_to_param_index[unused_param_fqn]) + in unused_param_substr, + f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}", + ) + + # Validate that used param fqns don't show up in error + # logs. + for used_param_fqn in used_param_fqns: + self.assertFalse(used_param_fqn in unused_param_substr) + # Validate that ignored param fqns don't show up as unused + # (since DDP does not track them) + for sparse_param_fqn in sparse_embedding_fqns: + self.assertFalse(sparse_param_fqn in unused_param_substr) + else: + self.assertTrue(False, "Expected error was not raised!") + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_error(self): + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False) + + @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_multiple_nested_unused_params_err_ignore_params(self): + # Tests unused parameter reporting when DDP is configured to ignore + # certain parameters. + self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + def test_ddp_inference(self): + # tests that DDP module can be run on a single node with no_grad + # or eval setting and there is no hang. + rank = self.rank + torch.cuda.set_device(rank) + model = Net().cuda() + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + syncbn_model = nn.SyncBatchNorm( + 2, momentum=0.99, track_running_stats=False + ).cuda() + local_syncbn_model = copy.deepcopy(syncbn_model) + syncbn_model = torch.nn.parallel.DistributedDataParallel( + syncbn_model, device_ids=[rank] + ) + inp = torch.randn(10, 2, device=rank) + inp_syncbn = torch.randn(10, 2, 4, 4, device=rank) + tests = [ + (model, local_model, inp), + (syncbn_model, local_syncbn_model, inp_syncbn), + ] + for test in tests: + test_model, test_local_model, test_inp = test + if self.rank == 0: + test_model.eval() + test_local_model.eval() + for _ in range(6): + self.assertEqual( + test_model(test_inp), test_local_model(test_inp) + ) + + # Barrier since only rank 0 runs inference. Test should be + # much faster than 30s, but this is to avoid flakiness. + self._barrier(timeout=30) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @skip_if_lt_x_gpu(2) + @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") + def test_ddp_sync_bn_training_vs_eval(self): + rank = self.rank + torch.cuda.set_device(rank) + # Need to set track_running_stats=False, when track_running_stats=True, + # bn_training is False and sync could not occur in eval model. + model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda( + rank + ) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) + # Test sync occurs in training mode. + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model(inp) + loss = out.sum() + loss.backward() + + # SyncBN allgathers stats across all ranks, so verify call to + # all_gather in profiler. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertNotEqual([], all_gather_calls) + + # Only do inference on one rank. If SyncBN did collective stats sync, + # this would hang/error. + model_inference = model.module + if self.rank == 0: + model_inference.eval() + with torch.autograd.profiler.profile() as prof: + for i in range(6): + inp = torch.randn(10, 2, 4, 4).cuda(rank) + out = model_inference(inp) + loss = out.sum() + loss.backward() + + # Ensure sync does not occur in eval() mode. + if BACKEND == "nccl": + all_gather_calls = get_profiling_event("_all_gather_base", prof) + else: + all_gather_calls = get_profiling_event("all_gather", prof) + self.assertEqual([], all_gather_calls) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_python_error_logged(self): + # Most python exceptions in DDP are raised during init before + # reducer is constructed, so we don't have a logger in those cases. + # However, the below is one example where a python error is thrown + # after reducer is constructed. + model = TwoLinLayerNet().cuda(self.rank) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + expected_err = "must be callable" + with self.assertRaisesRegex(TypeError, expected_err): + model.register_comm_hook({}, {}) + + verify_ddp_error_logged(model, expected_err) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_static_graph_nested_types(self): + # Tests for static graph training when outputs are not just tensors + # but can be (nested) tuple, list, dict, etc. + rank = self.rank + torch.cuda.set_device(rank) + + class NestedOutputModule(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(100, 1, bias=False) + + def forward(self, inp, output_type): + if output_type == "tuple": + return ( + self.lin(inp), + ( + self.lin(inp), + self.lin(inp), + ), + ) + elif output_type == "list": + return [ + self.lin(inp), + [ + self.lin(inp), + self.lin(inp), + ], + ] + elif output_type == "dict": + return { + "a": self.lin(inp), + "b": { + "c": self.lin(inp), + }, + } + + def get_loss(model_output): + loss = 0.0 + if isinstance(model_output, torch.Tensor): + return model_output.sum() + elif isinstance(model_output, dict): + for value in model_output.values(): + loss += get_loss(value) + elif isinstance(model_output, (tuple, list)): + for x in model_output: + loss += get_loss(x) + else: + raise ValueError(f"Unknown model output type {type(model_output)}") + return loss + + model = NestedOutputModule().cuda(rank) + model_static_graph = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + ) + model_static_graph = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[rank], + static_graph=True, + ) + inp = torch.randn(10, 100) + type_mapping = { + "list": list, + "tuple": tuple, + "dict": dict, + } + for output_type in type_mapping.keys(): + for i in range(6): + out = model(inp, output_type=output_type) + loss = get_loss(out) + loss.backward() + self._model_step(model) + out_static = model_static_graph(inp, output_type=output_type) + self.assertTrue(isinstance(out_static, type_mapping[output_type])) + loss_static = get_loss(out_static) + loss_static.backward() + self._model_step(model_static_graph) + for (p, p_static) in zip( + model.parameters(), model_static_graph.parameters() + ): + self.assertEqual(p, p_static) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_returns_tensor_with_no_grad(self): + # Tests case where module returns tensor that does not require grad. + torch.cuda.set_device(self.rank) + + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + + def forward(self, x): + x = self.fc2(F.relu(self.fc1(x))) + y = x.clone() + x = x.detach() + assert not x.requires_grad + return (x, y) + + model = MyModel().to(self.rank) + inp = torch.randn(1, 10, device=self.rank) + for (find_unused, static_graph) in itertools.product( + [True, False], [True, False] + ): + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + for i in range(6): + out = ddp(inp) + self.assertFalse(out[0].requires_grad) + o = (out[0] + out[1]).sum() + o.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_detect_ddp_is_actually_static(self): + class ToyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.net1 = nn.Linear(10, 10, bias=False) + self.net2 = nn.Linear(10, 10) + + def forward(self, x, find_unused, dynamic): + if find_unused: + if dynamic: + return self.net2(self.net1(x)) + else: + return self.net2(x) + else: + return self.net2(self.net1(x)) + + # Set of unused parameters don't change across iterations + torch.cuda.set_device(self.rank) + model = ToyModel().cuda() + for find_unused in [True, False]: + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=find_unused, + ) + inp = torch.randn(1, 10, device="cuda") + for _ in range(6): + out = ddp(inp, find_unused=find_unused, dynamic=False) + loss = out.sum() + loss.backward() + self.assertTrue(ddp.reducer._ddp_graph_static()) + + # Set of unused parameters dynamically change + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + inp = torch.randn(1, 10, device="cuda") + for i in range(6): + out = ddp(inp, find_unused=True, dynamic=i % 2 == 0) + loss = out.sum() + loss.backward() + self.assertFalse(ddp.reducer._ddp_graph_static()) + + def _test_ddp_new_tensor_in_fwd(self, static_graph): + # Test from https://github.com/pytorch/pytorch/issues/60733 + class MyModel(nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc1 = nn.Linear(10, 10, bias=False) + self.fc2 = nn.Linear(10, 10, bias=False) + self.device = self.fc1.weight.device + + def __init_opt(self): + opt = torch.randn(1, 10, device=self.device) + return opt + + def forward(self, x, opt_1, opt_2, opt_nested): + x = F.relu(self.fc1(x)) + x = self.fc2(x) + if opt_1 is None: + opt_1 = self.__init_opt() + if opt_2 is None: + opt_2 = self.__init_opt() + if opt_nested is None or not torch.is_tensor(opt_nested): + opt_nested = self.__init_opt() + # Test multiple tensors as well as newly created tensors + # within a struct. + return x, opt_1, opt_2, {"tensor": opt_nested} + + model = MyModel().to(self.rank) + for find_unused in [True, False]: + ddp = DistributedDataParallel( + model, + device_ids=[self.rank], + output_device=self.rank, + broadcast_buffers=False, + find_unused_parameters=find_unused, + static_graph=static_graph, + ) + + opt = [None for _ in range(3)] + for i in range(2): + ddp.zero_grad() + x = torch.randn(1, 10, device=self.rank) + out, opt[0], opt[1], opt[2] = ddp( + x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2] + ) + for i in range(len(opt)): + if torch.is_tensor(opt[i]): + self.assertEqual(opt[i].grad_fn, None) + else: + self.assertEqual(opt[i]["tensor"].grad_fn, None) + out.mean().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_new_tensor_in_fwd_static_graph(self): + return self._test_ddp_new_tensor_in_fwd(static_graph=True) + + def _test_ddp_buffer_hook_allreduce(self, return_futures): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + buffers = [buffer for (_, buffer) in named_buffers.items()] + futs = [ + dist.all_reduce( + buffer, group=ddp.process_group, async_op=True + ).get_future() + for buffer in buffers + ] + if return_futures: + return futs + else: + torch.futures.collect_all(futs).wait() + + hook_pre_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD + ) + hook_post_fwd = ( + torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD + ) + for hook_run_location in [ + hook_pre_fwd, + hook_post_fwd, + ]: + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook( + model_ddp, buffer_comm_hook, hook_run_location + ) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + broadcast_buffers=False, + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + # Since buffer reduction is done pre-forward, simulate it for + # no hook case here. + # Simulate allreduce appropriately depending on hook location. + if hook_run_location == hook_pre_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + + loss_no_hook = model_ddp_no_hook(inp).sum() + if hook_run_location == hook_post_fwd: + model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) + for tensor in model_no_hook_buffers: + dist.all_reduce(tensor) + torch.cuda.synchronize() + + # if return_futures, they are only awaited on by DDP + # at the end of the backwards pass for maximum overlap. + if not return_futures: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + # Note that when custom hooks return futures, this + # comparison is not expected to work when hook run location + # is pre-forward pass. This is because the hook does async + # communication and forward pass modifies the buffer without + # appropriate synchronization. Therefore, if returning + # futures from custom buffer hooks, it is advised to set + # hook run location to post forward. + if return_futures and hook_run_location == hook_post_fwd: + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce_return_future(self): + self._test_ddp_buffer_hook_allreduce(return_futures=True) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_buffer_hook_allreduce(self): + self._test_ddp_buffer_hook_allreduce(return_futures=False) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer_via_hook(self): + # test that _distributed_broadcast_coalesced via registered hook is + # equivalent to DDP's default broadcast coalesced. + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + def buffer_comm_hook(ddp, named_buffers): + # named_buffers is a Dict[str, Tensor] representing a mapping + # from buffer name to buffer. + buffers = [buffer for (_, buffer) in named_buffers.items()] + ddp._default_broadcast_coalesced(buffers) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook) + model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model), + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + loss_hook = model_ddp(inp).sum() + loss_no_hook = model_ddp_no_hook(inp).sum() + self._verify_buffers_equal(model_ddp, model_ddp_no_hook) + loss_hook.backward() + loss_no_hook.backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_remove_autograd_hooks(self): + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(nn.Module): + def __init__(self, device): + super().__init__() + self.error = True + self.fc1 = nn.Linear(10, 10).cuda(device) + + def forward(self, inp): + if self.error: + return self.fc1(SimulateError.apply(inp)) + else: + return self.fc1(inp) + + + # Run with error to trigger backward pass that marks fc1 as being marked + # ready. If we don't remove autograd hooks before running below it would + # fail on the old autograd hook. + model = MyModel(self.rank) + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + model_ddp1 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + + with self.assertRaises(RuntimeError): + model_ddp1(input).sum().backward() + + # Remove autograd hooks on old instance. + model_ddp1._remove_autograd_hooks() + + # Try another DDP instance without error now. + model.error = False + model_ddp2 = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + model_ddp2(input).sum().backward() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + @unittest.skip("Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751") + def test_ddp_has_finalized(self): + + @dataclass + class MyClass: + obj: torch.Tensor + + class MyModel(nn.Module): + def __init__(self, rank): + super().__init__() + self.rank = rank + self.fc1 = nn.Linear(1024, 1024).cuda(rank) + self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank) + + def forward(self, inp): + if self.rank == 0: + return self.fc1(inp), MyClass(self.fc2(inp)) + else: + return self.fc1(inp), self.fc2(inp) + + model = MyModel(self.rank) + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter. + ) + + if self.rank == 0: + out1, _ = ddp(input) + out1.sum().backward() + else: + out1, out2 = ddp(input) + (out1.sum() + out2.sum()).backward() + + if self.rank == 0: + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp._check_reducer_finalized() + + with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): + ddp(input) + else: + ddp._check_reducer_finalized() + ddp(input) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl", + "TORCH_NCCL_USE_COMM_NONBLOCKING only applies to NCCL" + ) + def test_nccl_init_abort(self): + """ + Tests that we can abort a NCCL communicator during initialization and + recover appropriately. + """ + # Reinitialize global process group with TORCH_NCCL_USE_COMM_NONBLOCKING=1 + os.environ["TORCH_NCCL_USE_COMM_NONBLOCKING"] = "1" + dist.destroy_process_group() + timeout = timedelta(seconds=1) + dist.init_process_group( + init_method=INIT_METHOD, + backend=BACKEND, + world_size=int(os.environ["WORLD_SIZE"]), + rank=self.rank, + timeout=timeout, + ) + + # Abort pg in background thread. + running = True + + def abort(device): + pg = _get_default_group() + while running: + pg._get_backend(torch.device(device))._shutdown() + time.sleep(1) + + if self.rank != 1: + import threading + t = threading.Thread(target=abort, args=(self.rank,)) + t.start() + with self.assertRaises(RuntimeError): + # First collective triggers initialization via ncclCommInitRank. + torch.distributed.barrier() + running = False + t.join() + + def _run_ddp_update_process_group(self, new_pg): + def get_num_torch_recompiles(): + guard_failures = torch._dynamo.utils.guard_failures + num_recompiles = [len(guard_failures[code]) for code in guard_failures] + return 0 if len(num_recompiles) == 0 else max(num_recompiles) + + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + # 4MB for multiple buckets. + self.fc1 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc2 = torch.nn.Linear(1024, 1024).cuda(device) + self.fc3 = torch.nn.Linear(1024, 1024).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc3(self.fc2(self.fc1(inp))) + + + input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + model = torch.compile(ddp) + + def run_iteration(): + # Run regular iteration. + out = model(input, error=False) + out.sum().backward() + torch.cuda.synchronize() + + # Run with error. + with self.assertRaises(RuntimeError): + out = model(input, error=True) + out.sum().backward() + torch.cuda.synchronize() + + run_iteration() + assert 0 == get_num_torch_recompiles() + + if new_pg: + # Now reduce world_size and run iteration. + group_size_2 = dist.new_group(ranks=[0, 1]) + ddp._update_process_group(group_size_2) + if self.rank in [0, 1]: + run_iteration() + + # Increase the world size and run iteration. + group_size_3 = dist.new_group(ranks=[1, 2, 3]) + ddp._update_process_group(group_size_3) + if self.rank in [1, 2, 3]: + run_iteration() + + # Back to default size. + ddp._update_process_group(_get_default_group()) + run_iteration() + else: + # Create default pg of smaller size. + dist.destroy_process_group() + + if self.rank in [1, 2, 3]: + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=3, + rank=self.rank - 1, + timeout=timedelta(seconds=default_pg_timeout), + ) + ddp._update_process_group(_get_default_group()) + run_iteration() + dist.destroy_process_group() + + # Need a barrier here to ensure ranks 1, 2 and 3 are done. + self._barrier(wait_for=4) + + # Need to init pg again for "_barrier" to succeed. + dist.init_process_group( + init_method=self.init_method, + backend=BACKEND, + world_size=4, + rank=self.rank, + timeout=timedelta(seconds=default_pg_timeout), + ) + + # Validate no more recompiles. + assert 0 == get_num_torch_recompiles() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_new_group(self): + self._run_ddp_update_process_group(new_pg=True) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_default_group(self): + self._run_ddp_update_process_group(new_pg=False) + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_grad_undefined(self): + class SimulateError(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + return input + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError + + class MyModel(torch.nn.Module): + def __init__(self, device): + super().__init__() + self.fc1 = torch.nn.Linear(10, 10).cuda(device) + self.fc2 = torch.nn.Linear(10, 10).cuda(device) + self.fc3 = torch.nn.Linear(10, 10).cuda(device) + + def forward(self, inp, error): + if error: + return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) + else: + return self.fc2(self.fc1(inp)) + + + input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + MyModel(self.rank), + device_ids=[self.rank], + find_unused_parameters=True, + bucket_cap_mb=1, + ) + + try: + ddp(input, True).sum().backward() + except RuntimeError: + ddp._update_process_group(_get_default_group()) + + # Reset grads. + for param in ddp.parameters(): + param.grad = None + + # Run ddp again. + ddp(input, False).sum().backward() + + @skip_if_lt_x_gpu(4) + @require_world_size(4) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_update_process_group_no_find_unused(self): + ddp = torch.nn.parallel.DistributedDataParallel( + torch.nn.Linear(10, 10).cuda(self.rank), + device_ids=[self.rank], + find_unused_parameters=False, + ) + ddp._update_process_group(_get_default_group()) + + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_broadcast_buffer(self): + rank = self.rank + torch.cuda.set_device(rank) + torch.manual_seed(rank) + torch.cuda.manual_seed(rank) + + class NetWithBuffers(nn.Module): + def __init__(self) -> None: + super().__init__() + self.a = nn.Linear(10, 10, bias=False) + self.b = nn.Linear(10, 1, bias=False) + self.register_buffer("buffer", torch.randn(1, 2)) + + def forward(self, x): + return self.b(self.a(x)) + + model = NetWithBuffers().cuda(rank) + model_ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + inp = torch.randn(2, 10, device=rank) + for i in range(2): + if rank == 0: + model_ddp.module.buffer = model_ddp.module.buffer + 1 + loss = model_ddp(inp).sum() + loss.backward() + # Ensure all buffers are synchronized. + bufs = [ + torch.empty_like(model_ddp.module.buffer) + for _ in range(dist.get_world_size()) + ] + dist.all_gather(bufs, model_ddp.module.buffer) + rank_0_buf = bufs[0] + for buf in bufs[1:]: + self.assertEqual(rank_0_buf, buf) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_static_graph_multi_forward(self): + class Net(nn.Module): + def __init__(self) -> None: + super().__init__() + self.lin = nn.Linear(10, 10) + self.relu = nn.ReLU() + + def forward(self, x): + return self.relu(self.lin(x)) + + torch.cuda.set_device(self.rank) + torch.manual_seed(42 << 1337 % (self.rank + 1)) + model = Net().cuda(self.rank) + local_model = copy.deepcopy(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[self.rank], static_graph=True + ) + inp = torch.ones(2, 10, device="cuda") + for _ in range(3): + model.zero_grad() + local_model.zero_grad() + a = model(inp) + b = model(inp) + loss = a.sum() + b.sum() + loss.backward() + # Grads should be equal to a local model that ran through inp twice and averaged grads + if self.rank == 0: + inp_clone = inp.clone() + for _ in range(2): + a = local_model(inp_clone) + b = local_model(inp_clone) + loss = a.sum() + b.sum() + loss.backward() + + ws = dist.get_world_size() + for p in local_model.parameters(): + p.grad.data = p.grad / dist.get_world_size() + + for p_ddp, p_local in zip( + model.parameters(), + local_model.parameters() + ): + self.assertTrue( + torch.allclose( + p_ddp.grad, p_local.grad + ), + f"{p_ddp.grad} vs {p_local.grad}" + ) + + dist.barrier() + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND != "nccl" and BACKEND != "gloo", + "Only Nccl & Gloo backend support DistributedDataParallel", + ) + def test_sync_bn_logged(self): + model = BN_NET + rank = self.rank + # single gpu training setup + model_gpu = model.cuda(rank) + no_sync_bn = torch.nn.parallel.DistributedDataParallel( + copy.deepcopy(model_gpu), + device_ids=[self.rank], + ) + ddp_logging_data = no_sync_bn._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", True) + self.assertFalse(sync_bn_logged) + model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu) + model_DDP = torch.nn.parallel.DistributedDataParallel( + model_DDP, + device_ids=[self.rank], + ) + ddp_logging_data = model_DDP._get_ddp_logging_data() + sync_bn_logged = ddp_logging_data.get("has_sync_bn", False) + self.assertTrue(sync_bn_logged) + + @skip_if_lt_x_gpu(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_stateless_api_with_ddp(self): + class MockModule(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.l1 = torch.nn.Linear(1, 1) + buffer = torch.ones(1) + self.register_buffer("buffer", buffer) + + def forward(self, x): + return self.l1(x) + self.buffer + + device = self.rank + module = MockModule().to(device) + module = torch.nn.parallel.DistributedDataParallel( + module, device_ids=[device] + ) + x = torch.rand((1, 1)).to(device) + weight = torch.tensor([[1.0]], device=device, requires_grad=True) + bias = torch.tensor([0.0], device=device, requires_grad=True) + buffer = torch.tensor([0.0], device=device) + parameters = { + "module.l1.weight": weight, + "module.l1.bias": bias, + "module.buffer": buffer, + } + prev_weight = module.module.l1.weight.clone() + prev_buffer = module.module.buffer.clone() + + res = torch.func.functional_call(module, parameters, x) + self.assertEqual(x, res) + # check that the weight remain unmodified + cur_weight = module.module.l1.weight + cur_buffer = module.module.buffer + self.assertEqual(cur_weight, prev_weight) + self.assertEqual(cur_buffer, prev_buffer) + # run a backward pass and check the gradients + res.backward() + self.assertIsNotNone(weight.grad) + self.assertIsNotNone(bias.grad) + # Gradient was not calculated for the module stated and buffers + self.assertIsNone(buffer.grad) + self.assertIsNone(module.module.l1.weight.grad) + self.assertIsNone(module.module.l1.bias.grad) + self.assertIsNone(module.module.buffer.grad) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_forward_backward_hook(self): + class DummyTestModel(nn.Module): + def __init__(self) -> None: + super().__init__() + torch.manual_seed(0) + self.fc = nn.Linear(2, 2) + + def forward(self, x): + return self.fc(x) + + def relu_hook(module, input): + return nn.functional.relu(input[0]) + + def gelu_hook(module, _input, output): + return nn.functional.gelu(output) + + def celu_hook(module, _input, output): + return (nn.functional.celu(output[0]),) + + local_model = DummyTestModel() + ddp_model = DummyTestModel() + local_model.fc.register_forward_pre_hook(relu_hook) + local_model.fc.register_forward_hook(gelu_hook) + ddp_model.fc.register_forward_pre_hook(relu_hook) + ddp_model.fc.register_forward_hook(gelu_hook) + local_model.fc.register_backward_hook(celu_hook) + ddp_model.fc.register_backward_hook(celu_hook) + ddp_model = DistributedDataParallel( + ddp_model.to(self.rank), device_ids=[self.rank] + ) + input_data = torch.rand(5, 2) + output_local = local_model(input_data) + output_ddp = ddp_model(input_data.to(self.rank)) + self.assertEqual(output_local, output_ddp) + output_local.sum().backward() + output_ddp.sum().backward() + ddp_grads = [p.grad for p in ddp_model.parameters()] + self.assertEqual(ddp_grads[0], local_model.fc.weight.grad) + self.assertEqual(ddp_grads[1], local_model.fc.bias.grad) + + def _test_hook_pickling(self, hook, hook_state): + torch.manual_seed(0) + learning_rate = 0.01 + chkpt_file = tempfile.gettempdir() + "/checkpoint.pt" + rank = self.rank + + input = torch.randn(7, 1, device=rank) + target = torch.randn(7, 5, device=rank) + net = torch.nn.Linear(1, 5).to(rank) + ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank]) + dummy_ddp_model = DistributedDataParallel( + copy.deepcopy(net), device_ids=[rank] + ) + optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate) + ddp_model.register_comm_hook(hook_state, hook) + ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + out = ddp_model(input) + loss = F.mse_loss(out, target) + loss.backward() + optimizer.step() + + state = { + "state_dict": ddp_model.state_dict(), + "comm_hook": hook, + "comm_hook_state": hook_state, + } + + if rank == 0: + with self.assertLogs("torch.distributed") as captured: + torch.save(state, chkpt_file) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group is not serializable and excluded from a saved state.", + ) + + dist.barrier() + map_location = {"cuda:%d" % 0: "cuda:%d" % rank} + with self.assertLogs("torch.distributed") as captured: + checkpoint = torch.load(chkpt_file, map_location=map_location) + + # Check that the logger has only one entry + self.assertEqual(len(captured.records), 1) + # Check that the logger has an expected entry + self.assertEqual( + captured.records[0].getMessage(), + "NOTE: Process group will be set to a default group (i.e. the world size).\ + If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.", + ) + + dummy_ddp_model.load_state_dict(checkpoint["state_dict"]) + dummy_hook = checkpoint["comm_hook"] + dummy_hook_state = checkpoint["comm_hook_state"] + dummy_optimizer = torch.optim.SGD( + dummy_ddp_model.parameters(), lr=learning_rate + ) + + # Check that loaded function is correct + self.assertEqual(dummy_hook.__qualname__, hook.__qualname__) + + # Check that all slots' keys were restored correctly + self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__) + + # Check that all slots' attributes are restored correctly + # Excluding ``process_group`` and ``rng``. + for entry in dummy_hook_state.__slots__: + if entry != "process_group" and entry != "rng": + self.assertEqual( + getattr(dummy_hook_state, entry), getattr(hook_state, entry) + ) + + # Check that ``process_group`` was set to default + self.assertEqual(dummy_hook_state.process_group, _get_default_group()) + + # Check that a random state was restored properly: + # ``np.random.RandomState.get_state`` returns a tuple with entries: + # ``bit_generator`` - str, + # ``state.key`` - ndarray dtype[uint32], + # ``state.pos`` - int, + # ``has_gauss`` - int, + # ``gauss`` - float + # (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi) + # To make sure random state was restored properly, all entries should equal the original + for entry1, entry2 in zip( + hook_state.rng.get_state(), dummy_hook_state.rng.get_state() + ): + np.testing.assert_array_equal(entry1, entry2) + + dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook) + dummy_ddp_model.train() + + for _ in range(10): + optimizer.zero_grad() + dummy_optimizer.zero_grad() + out_origin = ddp_model(input) + out_dummy = dummy_ddp_model(input) + loss_origin = F.mse_loss(out_origin, target) + loss_dummy = F.mse_loss(out_dummy, target) + loss_origin.backward() + loss_dummy.backward() + optimizer.step() + dummy_optimizer.step() + + # Check that gradients after 10 epochs are the same + for orig_param, dummy_param in zip( + ddp_model.parameters(), dummy_ddp_model.parameters() + ): + self.assertEqual(orig_param.grad, dummy_param.grad) + + dist.barrier() + if rank == 0: + os.remove(chkpt_file) + + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["cuda"], + f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", + ) + @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) + @skip_but_pass_in_sandcastle_if( + True, "Skipped due to flakiness" + ) + def test_ddp_hook_pickling_powerSGD(self): + + hook = powerSGD.powerSGD_hook + powersgd_state = powerSGD.PowerSGDState( + process_group=None, + matrix_approximation_rank=1, + start_powerSGD_iter=4, + ) + self._test_hook_pickling(hook, powersgd_state) + + @require_backend_is_available(DistTestCases.backend_feature["gpu"]) + @skip_if_lt_x_gpu(2) + def test_ddp_device_mesh_initialization(self): + """ + Test DDP with device_mesh initialization. + """ + world_size = int(os.environ["WORLD_SIZE"]) + + from torch.distributed.device_mesh import init_device_mesh + device_mesh = init_device_mesh("cuda", (world_size,)) + + pg = _get_default_group() + + torch.cuda.set_device(self.rank) + model = TwoLinLayerNet().cuda() + ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_mesh=device_mesh) + self.assertEqual(ddp_model.device_mesh, device_mesh) + + with self.assertRaisesRegex( + RuntimeError, "Cannot specify both process_group and device_mesh arguments." + ): + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, process_group=pg, device_mesh=device_mesh + ) + + with self.assertRaisesRegex( + RuntimeError, "Only 1D device mesh is supported," + ): + device_mesh = init_device_mesh("cuda", (2, world_size // 2)) + ddp_model = torch.nn.parallel.DistributedDataParallel( + model, device_mesh=device_mesh + ) + + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_compile_static_graph(self): + "Tests that DDP works with torch compile when static_graph=True" + model = torch.nn.Linear(10, 10).cuda(self.rank) + model_clone = copy.deepcopy(model) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + ) + ddp_static = torch.nn.parallel.DistributedDataParallel( + model_clone, + device_ids=[self.rank], + static_graph=True + ) + ddp = torch.compile(ddp) + ddp_static = torch.compile(ddp_static) + input = torch.rand(10, 10).cuda(self.rank) + # verify output and gradient parity + for _ in range(6): + out_ddp = ddp(input).sum() + out_ddp_static = ddp_static(input).sum() + self.assertEqual(out_ddp, out_ddp_static) + out_ddp.backward() + out_ddp_static.backward() + for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()): + self.assertEqual(p1.grad, p2.grad) + + @skip_if_lt_x_gpu(2) + @require_world_size(2) + @skip_but_pass_in_sandcastle_if( + BACKEND not in DistTestCases.backend_feature["ddp"], + f"The {BACKEND} backend does not support DistributedDataParallel", + ) + def test_ddp_sink_noclone(self): + "Tests that we can configure DDP to avoid clone" + + class OpPatcher(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + func_packet = func._overloadpacket + if func_packet == torch.ops.aten.clone: + raise RuntimeError("clone encountered!") + kwargs = kwargs if kwargs else {} + return func(*args, **kwargs) + + class MyModel(torch.nn.Module): + def __init__(self) -> None: + super().__init__() + self.fc = torch.nn.Linear(10, 10) + + def forward(self, input): + return self.fc(input) + + model = MyModel().cuda(self.rank) + ddp = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[self.rank], + find_unused_parameters=True, + ) + ddp._set_ddp_sink_clone(False) + input = torch.rand(10, 10).cuda(self.rank) + + with OpPatcher() as patcher: + ddp(input).sum().backward() + + + +instantiate_parametrized_tests(DistributedTest._DistTestBase) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..ff4cbe56abc9edf395f6f2cf8f98c59586cd07c6 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py @@ -0,0 +1,31 @@ +# mypy: allow-untyped-defs + +import torch.distributed as dist + +from torch._C._distributed_c10d import ( + FakeProcessGroup, +) + + +class FakeStore(dist.Store): + """ + A fake store is a fake Key-Value store simply for initialization usage + the of fake process group, one can either use FakeStore or HashStore. + """ + + +def _create_fake_pg(prefix_store, rank, world_size, timeout): + """ + A fake process group (not related to FakeTensor) is a process group which + doesn't actually do any communication, it just hallucinates some + communication. You can run a single rank with a fake process group + without needing multiple processes (simulates per-rank behavior) + + NOTE: This is not a real process group, and it would produce wrong results + for every collective. It should be used as a convinient tool when playing + with distributed but don't care about the actual data. + """ + return FakeProcessGroup(rank, world_size) + + +dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda']) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py new file mode 100644 index 0000000000000000000000000000000000000000..e9984ba354cee1daa7a5db1b3daaafe7256ce077 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py @@ -0,0 +1,543 @@ +# mypy: allow-untyped-defs + +import sys +import threading +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union +from functools import partial, reduce + +import torch +import torch.distributed as dist +import weakref +from torch._C._distributed_c10d import ( + _create_work_from_future, + AllgatherOptions, + AllreduceOptions, + AllToAllOptions, + BarrierOptions, + BroadcastOptions, + ReduceScatterOptions, + ScatterOptions, + Store, + ReduceOp, +) +from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp +from torch.futures import Future +from torch.utils import _pytree as pytree + +""" +TODO: +Lots of missing collectives. +Collectives validation. +Make timeout robust by making collectives respect the test deadline. +Make tests robust by making collectives interruptible. +We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures. + +""" + + +def flatten_list(lst): + return pytree.tree_leaves(lst) + + +def ret_work(ret): + fut = Future() + fut.set_result(ret) + return _create_work_from_future(fut) + +def binop_reduce(tensors, op): + res = op(torch.stack(tensors), dim=0) + if isinstance(res, torch.Tensor): + return res + # min/max return a namedtuple + return res.values + +def bitwise_reduce(tensors, op): + return reduce(op, tensors) + +_reduce_ops = { + ReduceOp.SUM: partial(binop_reduce, op=torch.sum), + ReduceOp.AVG: partial(binop_reduce, op=torch.mean), + ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod), + ReduceOp.MIN: partial(binop_reduce, op=torch.min), + ReduceOp.MAX: partial(binop_reduce, op=torch.max), + ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and), + ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or), + ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor), +} + +class AllToAll: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_tensor_list, _ = data[dest_rank] + for src_rank in range(world_size): + _, input_tensor_list = data[src_rank] + output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank]) + +class AllToAllBase: + @torch.no_grad() + def work(self, data): + world_size = len(data) + for dest_rank in range(world_size): + output_buffer, _, output_split_sizes, _ = data[dest_rank] + + output_indexes = self._size_cumsum(output_buffer.size(0), output_split_sizes, world_size) + + for src_rank in range(world_size): + _, input_buffer, _, input_split_sizes = data[src_rank] + input_indexes = self._size_cumsum(input_buffer.size(0), input_split_sizes, world_size) + + output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_( + input_buffer[input_indexes[dest_rank]:input_indexes[dest_rank + 1]] + ) + + def _size_cumsum(self, buf_size: int, sizes: Union[torch.Tensor, List[int], None], world_size: int) -> torch.Tensor: + if sizes is None or len(sizes) == 0: + sizes = torch.full( + (world_size,), buf_size // world_size, dtype=torch.int64 + ) + if not isinstance(sizes, torch.Tensor): + sizes = torch.tensor(sizes, dtype=torch.int64) + assert sizes.dtype == torch.int64 + sizes = torch.cumsum( + torch.cat( + ( + torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes + ), + dim=0 + ), + dim=0 + ) + return sizes + +class AllReduce: + def __init__(self, op): + if op.op not in _reduce_ops: + raise NotImplementedError( + f"AllReduce op {op.op} not supported on multithreaded pg for now." + ) + self.op = op.op + + @torch.no_grad() + def work(self, data): + for i in range(len(data[0])): + tensors = [] + # use rank0 as the device for sum + rank_0_device = data[0][i].device + # collect all data to the list and make them + # all on rank 0 device + for src_rank in range(0, len(data)): + tensors.append(data[src_rank][i].to(rank_0_device)) + + # now mimic reduce across all ranks + res = _reduce_ops[self.op](tensors) + + # copy all the reduced value to each rank + for src_rank in range(len(data)): + data[src_rank][i].copy_(res.to(data[src_rank][i].device)) + + +class AllGather: + @torch.no_grad() + def work(self, data): + for src_rank in range(len(data)): + in_tensor_list = data[src_rank][1] + # Can't handle all_gather with multiple tensors + assert len(in_tensor_list) == 1 + src_tensor = in_tensor_list[0] + + for dest in data: + dest_tensor = dest[0][0][src_rank] + dest_tensor.copy_(src_tensor) + + +class Scatter: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + src_in_tensor_list = data[self.src][1] + # Can't handle scatter with multiple input tensor list + assert len(src_in_tensor_list) == 1 + src_in_tensors = src_in_tensor_list[0] + + for rank, each_rank_data in enumerate(data): + out_tensor_list = each_rank_data[0] + # Can't handle scatter with multiple output tensor + assert len(out_tensor_list) == 1 + dest_tensor = out_tensor_list[0] + dest_tensor.copy_(src_in_tensors[rank]) + + +class Gather: + def __init__(self, dst): + self.dst = dst + + @torch.no_grad() + def work(self, data): + # Can't handle gather with multiple tensor lists + assert len(data[self.dst][0]) == 1 + out_tensor_list = data[self.dst][0][0] + for rank, each_rank_data in enumerate(data): + src_in_tensor_list = each_rank_data[1] + # Can't handle gather with multiple tensor lists + assert len(src_in_tensor_list) == 1 + dest_tensor = out_tensor_list[rank] + dest_tensor.copy_(src_in_tensor_list[0]) + +class ReduceScatter: + def __init__(self, op): + if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG: + raise NotImplementedError(f"ReduceScatter does not support {op}") + self.op = op + + @torch.no_grad() + def work(self, data): + start_reduction = [False for _ in range(len(data))] + for each_rank_data in data: + # Can't handle reduce_scatter with multiple scatter list + assert len(each_rank_data[1]) == 1 + to_scatter = each_rank_data[1][0] + for i in range(len(to_scatter)): + dest_tensor_on_rank_i = data[i][0] + # Can't handle reduce_scatter with multiple output tensor + assert len(dest_tensor_on_rank_i) == 1 + dst_tensor_device = dest_tensor_on_rank_i[0].device + if not start_reduction[i]: + dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device)) + start_reduction[i] = True + else: + dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device)) + if self.op == dist.ReduceOp.AVG: + num_ranks = len(data) + for each_rank_data in data: + each_rank_data[0][0] /= num_ranks + + +class Broadcast: + def __init__(self, src): + self.src = src + + @torch.no_grad() + def work(self, data): + in_tensor_list = flatten_list(data[self.src]) + for i in range(len(data)): + out_tensor_list = flatten_list(data[i]) + for j in range(len(in_tensor_list)): + out_tensor_list[j].copy_(in_tensor_list[j]) + + +class Collective: + def __init__(self, world_size, collective, pg): + self._world_size = world_size + self._collective = collective + + self._start_cond = threading.Condition() + self._done_cond = threading.Condition() + + self._data = [None] * world_size + self._count = 0 + self._done = False + + self._pg = pg + + def join(self, rank, data): + with self._start_cond: + self._data[rank] = data + self._count += 1 + + # notify rank 0 + if self._count == self._world_size: + if rank > 0: + self._start_cond.notify() + + if rank == 0: + self._start_cond.wait_for( + lambda: self._count == self._world_size or self._pg._terminate.is_set() + ) + # SystemExit is not a subclass of Exception but BaseException + # and can be distinguished from normal exception raised from program errors + # so that we can hide it from the exception queue + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + + with self._done_cond: + # wait for rank 0 to finish + if rank > 0: + self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set()) + if self._pg._terminate.is_set(): + sys.exit("Test termination event occurs.") + else: + # copy data around + self._collective.work(self._data) + self._done = True + self._done_cond.notify_all() + return ret_work(data) + + +class ProcessLocalGroup(dist.ProcessGroup): + _coll_lock = threading.Lock() + _cur_coll_on_pgs = {} + + _terminate = threading.Event() + + @classmethod + def _start_coll(cls, collective, pg): + with cls._coll_lock: + # pg_name is unique, we use that to record the mapping between pg and collective + if pg.pg_name not in cls._cur_coll_on_pgs: + cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls) + return cls._cur_coll_on_pgs[pg.pg_name] + + @classmethod + def _end_coll(cls, collective, pg): + # This is racily called by all ranks, so only one will work + with cls._coll_lock: + if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective: + cls._cur_coll_on_pgs.pop(pg.pg_name) + + @classmethod + def exception_handle(cls, exc): + cls._terminate.set() + for coll in cls._cur_coll_on_pgs.values(): + with coll._start_cond: + coll._start_cond.notify() + with coll._done_cond: + coll._done_cond.notify_all() + + @classmethod + def reset(cls): + with cls._coll_lock: + cls._cur_coll_on_pgs = {} + cls._terminate.clear() + + def alltoall_base( + self, + output_buffer: torch.Tensor, + input_buffer: torch.Tensor, + output_split_sizes: Optional[List[int]], + input_split_sizes: Optional[List[int]], + opts=AllToAllOptions() + ) -> torch.Tensor: + coll = ProcessLocalGroup._start_coll(AllToAllBase(), self) + res = coll.join(self._rank, (output_buffer, input_buffer, output_split_sizes, input_split_sizes)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()): + coll = ProcessLocalGroup._start_coll(AllToAll(), self) + res = coll.join(self._rank, (output_tensor_list, input_tensor_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()): + coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def barrier(self, opts=BarrierOptions()): + return self.allreduce(tensor_list=[torch.ones(1)]) + + def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()): + coll = ProcessLocalGroup._start_coll(AllGather(), self) + res = coll.join(self._rank, (output_tensors, input_tensor)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()): + tensor_list = list(torch.chunk(output_tensor, self._world_size)) + return self.allgather([tensor_list], [input_tensor], opts) + + def broadcast(self, tensor_list, opts=BroadcastOptions()): + coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self) + res = coll.join(self._rank, tensor_list) + ProcessLocalGroup._end_coll(coll, self) + return res + + def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def gather(self, output_tensors, input_tensors, opts=ScatterOptions()): + coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self) + res = coll.join(self._rank, (output_tensors, input_tensors)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()): + coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self) + res = coll.join(self._rank, (output_tensor, scatter_list)) + ProcessLocalGroup._end_coll(coll, self) + return res + + def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()): + tensor_list = list(torch.chunk(input_tensor, self._world_size)) + return self.reduce_scatter([output_tensor], [tensor_list], opts) + + def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()): + works = [ + self._reduce_scatter_base(output_tensor, input_tensor, opts) + for output_tensor, input_tensor + in zip(output_tensors, input_tensors) + ] + for work in works[:-1]: + work.wait() + return works[-1] + + def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()): + res = None + for o_t, i_t in zip(output_tensor_list, input_tensor_list): + res = self._allgather_base(o_t, i_t) + return res + + def __init__(self, rank, world_size): + super().__init__(rank, world_size) + self._rank = rank + self._world_size = world_size + world = dist.distributed_c10d._world + if isinstance(world, ThreadLocalWorld): + world = world._get_world() + self._world = weakref.ref(world) + self._ctx = torch.autograd.set_multithreading_enabled(False) + + def size(self): + return self._world_size + + @property + def pg_name(self): + """ + return the global registered name of the current pg in the world + """ + return self._world().pg_names[self] + + @property + def group_name(self): + return self.pg_name + + def getBackendName(self): + return "threaded" + + def __repr__(self): + return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}" + + +def _create_threaded_pg(prefix_store, rank, world_size, timeout): + pg = ProcessLocalGroup(rank, world_size) + # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional + # When device mesh involves sub groups while store based barrier is not enabled in c10d, + # even though threaded pg actual collectives are assumed to be single threaded, + # different threads may be initializing different groups, + # leading to race conditions. + # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups + # (dim 0 and 1) would be initialized in different threads independently. + # In this case we can no longer rely on class or global variables + # but have to rely on store based barrier to make sure each group + # is ready separately before we can invoke collectives in any of the groups. + + # the prefix store is already per group so we pass an empty name here + _store_based_barrier(rank, prefix_store, "", world_size, timeout) + return pg + + +dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"]) + + +@dataclass +class WorldData: + default_pg: dist.ProcessGroup + pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]] + pg_names: Dict[dist.ProcessGroup, str] + pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]] + pg_backend_config: Dict[dist.ProcessGroup, str] + group_count: int + tags_to_pg: Dict[str, List[dist.ProcessGroup]] + pg_to_tag: Dict[dist.ProcessGroup, str] + pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]] + pg_default_device: Dict[dist.ProcessGroup, torch.device] + + +class ThreadLocalWorld: + _world = threading.local() + + def _get_world(self) -> WorldData: + if not hasattr(ThreadLocalWorld._world, "world"): + ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {}) + return ThreadLocalWorld._world.world + + @property + def default_pg(self): + return self._get_world().default_pg + + @default_pg.setter + def default_pg(self, value): + self._get_world().default_pg = value + + @property + def pg_map(self): + return self._get_world().pg_map + + @property + def pg_names(self): + return self._get_world().pg_names + + @property + def pg_group_ranks(self): + return self._get_world().pg_group_ranks + + @property + def pg_backend_config(self): + return self._get_world().pg_backend_config + + @property + def group_count(self) -> int: + return self._get_world().group_count + + @group_count.setter + def group_count(self, value): + self._get_world().group_count = value + + @property + def tags_to_pg(self): + return self._get_world().tags_to_pg + + @property + def pg_to_tag(self): + return self._get_world().pg_to_tag + + @property + def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]: + return self._get_world().pg_coalesce_state + + @property + def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]: + return self._get_world().pg_default_device + + +_old_pg_world = None +_ctx_manager = None + + +def _install_threaded_pg(): + global _old_pg_world + global _ctx_manager + _old_pg_world = dist.distributed_c10d._world + dist.distributed_c10d._world = ThreadLocalWorld() + _ctx_manager = torch.autograd.set_multithreading_enabled(False) + + return dist.distributed_c10d._world + + +def _uninstall_threaded_pg(): + dist.distributed_c10d._world = _old_pg_world diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de8be4db96d2059154d64a14164031d353e684de Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9b714c77aa998ea68f3bb5a95ed03d3beb802f3a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py @@ -0,0 +1,181 @@ +# mypy: allow-untyped-defs + +import os +import sys +import unittest +from typing import Dict, List, Type + +from torch.testing._internal.common_distributed import MultiProcessTestCase +from torch.testing._internal.common_utils import ( + TEST_WITH_DEV_DBG_ASAN, + find_free_port, + IS_SANDCASTLE, +) +from torch.testing._internal.distributed.ddp_under_dist_autograd_test import ( + CudaDdpComparisonTest, + DdpComparisonTest, + DdpUnderDistAutogradTest, +) +from torch.testing._internal.distributed.nn.api.remote_module_test import ( + CudaRemoteModuleTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, +) +from torch.testing._internal.distributed.rpc.dist_autograd_test import ( + DistAutogradTest, + CudaDistAutogradTest, + FaultyAgentDistAutogradTest, + TensorPipeAgentDistAutogradTest, + TensorPipeCudaDistAutogradTest +) +from torch.testing._internal.distributed.rpc.dist_optimizer_test import ( + DistOptimizerTest, +) +from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import ( + JitDistAutogradTest, +) +from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest +from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import ( + JitFaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( + RpcAgentTestFixture, +) +from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import ( + FaultyAgentRpcTest, +) +from torch.testing._internal.distributed.rpc.rpc_test import ( + CudaRpcTest, + RpcTest, + TensorPipeAgentRpcTest, + TensorPipeAgentCudaRpcTest, +) +from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest +from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import ( + ReinforcementLearningRpcTest, +) + + +def _check_and_set_tcp_init(): + # if we are running with TCP init, set main address and port + # before spawning subprocesses, since different processes could find + # different ports. + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + os.environ["MASTER_ADDR"] = '127.0.0.1' + os.environ["MASTER_PORT"] = str(find_free_port()) + +def _check_and_unset_tcp_init(): + use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None) + if use_tcp_init == "1": + del os.environ["MASTER_ADDR"] + del os.environ["MASTER_PORT"] + +# The tests for the RPC module need to cover multiple possible combinations: +# - different aspects of the API, each one having its own suite of tests; +# - different agents (ProcessGroup, TensorPipe, ...); +# To avoid a combinatorial explosion in code size, and to prevent forgetting to +# add a combination, these are generated automatically by the code in this file. +# Here, we collect all the test suites that we need to cover. +# We then have one separate file for each agent, from which +# we call the generate_tests function of this file, passing to it a fixture for +# the agent, which then gets mixed-in with each test suite. + +@unittest.skipIf( + TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues" +) +class SpawnHelper(MultiProcessTestCase): + def setUp(self): + super().setUp() + _check_and_set_tcp_init() + self._spawn_processes() + + def tearDown(self): + _check_and_unset_tcp_init() + super().tearDown() + + +# This list contains test suites that are agent-agnostic and that only verify +# compliance with the generic RPC interface specification. These tests should +# *not* make use of implementation details of a specific agent (options, +# attributes, ...). These test suites will be instantiated multiple times, once +# for each agent (except the faulty agent, which is special). +GENERIC_TESTS = [ + RpcTest, + ParameterServerTest, + DistAutogradTest, + DistOptimizerTest, + JitRpcTest, + JitDistAutogradTest, + RemoteModuleTest, + ThreeWorkersRemoteModuleTest, + DdpUnderDistAutogradTest, + DdpComparisonTest, + ReinforcementLearningRpcTest, +] +GENERIC_CUDA_TESTS = [ + CudaRpcTest, + CudaDistAutogradTest, + CudaRemoteModuleTest, + CudaDdpComparisonTest, +] + + +# This list contains test suites that will only be run on the TensorPipeAgent. +# These suites should be standalone, and separate from the ones in the generic +# list (not subclasses of those!). +TENSORPIPE_TESTS = [ + TensorPipeAgentRpcTest, + TensorPipeAgentDistAutogradTest, +] +TENSORPIPE_CUDA_TESTS = [ + TensorPipeAgentCudaRpcTest, + TensorPipeCudaDistAutogradTest, +] + + +# This list contains test suites that will only be run on the faulty RPC agent. +# That agent is special as it's only used to perform fault injection in order to +# verify the error handling behavior. Thus the faulty agent will only run the +# suites in this list, which were designed to test such behaviors, and not the +# ones in the generic list. +FAULTY_AGENT_TESTS = [ + FaultyAgentRpcTest, + FaultyAgentDistAutogradTest, + JitFaultyAgentRpcTest, +] + + +def generate_tests( + prefix: str, + mixin: Type[RpcAgentTestFixture], + tests: List[Type[RpcAgentTestFixture]], + module_name: str, +) -> Dict[str, Type[RpcAgentTestFixture]]: + """Mix in the classes needed to autogenerate the tests based on the params. + + Takes a series of test suites, each written against a "generic" agent (i.e., + derived from the abstract RpcAgentTestFixture class), as the `tests` args. + Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a + certain agent, as the `mixin` arg. Produces all combinations of them. + Returns a dictionary of class names to class type + objects which can be inserted into the global namespace of the calling + module. The name of each test will be a concatenation of the `prefix` arg + and the original name of the test suite. + The `module_name` should be the name of the calling module so + that the classes can be fixed to make it look like they belong to it, which + is necessary for pickling to work on them. + """ + ret: Dict[str, Type[RpcAgentTestFixture]] = {} + for test_class in tests: + if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN: + print( + f'Skipping test {test_class} on sandcastle for the following reason: ' + 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr) + continue + + name = f"{prefix}{test_class.__name__}" + class_ = type(name, (test_class, mixin, SpawnHelper), {}) + class_.__module__ = module_name + ret[name] = class_ + return ret diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py new file mode 100644 index 0000000000000000000000000000000000000000..7e81cee5801edfe3d7fe6cbb7c0a16985d2c92ef --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py @@ -0,0 +1,2868 @@ +""" +This file is needed for generating procedural tests required for +testing __torch_function__. See tests/test_overrides.py. +""" + +# flake8: noqa +import torch + +annotated_args = { + torch._C._VariableFunctions._cast_Byte: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Char: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Double: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Float: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Int: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Long: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Short: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Half: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_dual: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._unpack_dual: [{'is_kwarg_only': 'False', 'name': 'dual', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.align_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions._assert_scalar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions._functional_assert_scalar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._functional_assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._assert_tensor_metadata: [{'is_kwarg_only': 'False', 'name': 'a', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._print: [{'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._functional_sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._functional_sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_dep_token: [], + torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}], + torch._C._VariableFunctions._use_cudnn_rnn_flatten_weight: [], + torch._C._VariableFunctions._cudnn_rnn_flatten_weight: [{'is_kwarg_only': 'False', 'name': 'weight_arr', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cudnn_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'weight_buf', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}], + torch._C._VariableFunctions._cudnn_init_dropout_state: [{'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout_seed', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._debug_has_internal_overlap: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_dropout: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch._C._VariableFunctions._masked_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}], + torch._C._VariableFunctions.native_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool?'}], + torch._C._VariableFunctions._sobol_engine_draw: [{'is_kwarg_only': 'False', 'name': 'quasi', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}], + torch._C._VariableFunctions._sobol_engine_ff_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._sobol_engine_scramble_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ltm', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._sobol_engine_initialize_state_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._reshape_from_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._shape_as_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.imag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.adaptive_avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.adaptive_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_check_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_functorch_fallback: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._dim_arange: [{'is_kwarg_only': 'False', 'name': 'like', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.quantized_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_zero_point', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._batch_norm_impl_index: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch._C._VariableFunctions.bilinear: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.binary_cross_entropy_with_logits: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._lazy_clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.broadcast_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.block_diag: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cudnn_is_acceptable: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.constant_pad_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions._convolution_mode: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_tbc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._copy_from: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._copy_from_and_resize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosine_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cudnn_affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'H', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'W', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cudnn_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}], + torch._C._VariableFunctions.cudnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions.cudnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions.cudnn_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions._mps_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_grid_sampler: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._cummax_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._cummin_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.einsum: [{'is_kwarg_only': 'False', 'name': 'equation', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.embedding: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.embedding_renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max_norm', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_type', 'simple_type': 'double'}], + torch._C._VariableFunctions._embedding_bag_forward_only: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._rowwise_prune: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'compressed_indices_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_grad_by_freq', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sparse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'per_sample_weights', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'include_last_offset', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_idx', 'simple_type': 'int64_t?'}], + torch._C._VariableFunctions._embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.empty_permuted: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'physical_layout', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._empty_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._empty_per_channel_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._resize_output_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'device', 'simple_type': 'Device'}], + torch._C._VariableFunctions.empty_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'qtensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty_strided: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.full_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.from_file: [{'is_kwarg_only': 'False', 'name': 'filename', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.grid_sampler: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.grid_sampler_2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions._grid_sampler_2d_cpu_fallback: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.grid_sampler_3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}], + torch._C._VariableFunctions.hinge_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_groups', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.native_group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'HxW', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'group', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}], + torch._C._VariableFunctions._validate_compressed_sparse_indices: [{'is_kwarg_only': 'False', 'name': 'is_crow', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'compressed_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cdim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'nnz', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cufft_get_plan_cache_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._cufft_get_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._cufft_set_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}, {'is_kwarg_only': 'False', 'name': 'max_size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cufft_clear_plan_cache: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._unsafe_index: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}], + torch._C._VariableFunctions._unsafe_masked_index: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'fill', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._unsafe_masked_index_put_accumulate: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unsafe_index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._index_put_impl_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.instance_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'use_input_stats', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kl_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.native_layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.rms_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mkldnn_linear_backward_weights: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias_defined', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cslt_compress: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cslt_sparse_mm: [{'is_kwarg_only': 'False', 'name': 'compressed_A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dense_B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cslt_sparse_mm_search: [{'is_kwarg_only': 'False', 'name': 'compressed_A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dense_B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_tile: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_apply: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'thread_masks', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_apply_dense: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'thread_masks', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'meta', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_mm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1_meta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_addmm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1_meta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._mixed_dtypes_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_int8_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_int8_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_quantize_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_gemm_matrix_fp16: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._wrapped_linear_prepack: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._wrapped_quantized_linear_prepacked: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'out_channel', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fbgemm_linear_fp16_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_fp16_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'K', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.margin_ranking_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max_pool1d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.quantized_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.quantized_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.quantized_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._mps_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.mkldnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.mkldnn_rnn_layer: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight0', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reverse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}], + torch._C._VariableFunctions.miopen_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_depthwise_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.miopen_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.miopen_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}], + torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._convert_weight_to_int4pack: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'innerKTiles', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._weight_int4pack_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qGroupSize', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qScaleAndZeros', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._weight_int8pack_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_sparse_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit_no_training: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_gather_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.batch_norm_gather_stats_with_counts: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'counts', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.batch_norm_backward_reduce: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'input_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'weight_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bias_g', 'simple_type': 'bool'}], + torch._C._VariableFunctions.batch_norm_backward_elemt: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'sum_dy', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sum_dy_xmu', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.batch_norm_update_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}], + torch._C._VariableFunctions.is_vulkan_available: [], + torch._C._VariableFunctions._nnpack_available: [], + torch._C._VariableFunctions._nnpack_spatial_convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.ones_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pairwise_distance: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cdist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._euclidean_dist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pdist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosine_similarity: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pixel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'upscale_factor', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.pixel_unshuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'downscale_factor', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.native_channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.poisson_nll_loss: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'log_input', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'full', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'reduction', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scalar_tensor: [{'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.rand_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randn_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._mkldnn_reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rrelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rrelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._prelu_kernel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.selu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.selu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.celu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.celu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._chunk_cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._chunk_cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.threshold_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._mkldnn_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._mkldnn_transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._VariableFunctions.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._transform_bias_rescale_qkv: [{'is_kwarg_only': 'False', 'name': 'qkv', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._nested_tensor_from_mask: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_tensor_from_mask_left_aligned: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_from_padded: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cpu_nested_shape_example', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_from_padded_and_nested_example: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nt_example', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_jagged: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_jagged_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_jagged_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_offsets: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_lengths: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_ragged_idx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_min_seqlen: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_max_seqlen: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_get_jagged_dummy: [{'is_kwarg_only': 'False', 'name': 'any', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_compute_contiguous_strides_offsets: [{'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._trilinear: [{'is_kwarg_only': 'False', 'name': 'i1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'expand1', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand2', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand3', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sumdim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.triplet_margin_loss: [{'is_kwarg_only': 'False', 'name': 'anchor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'positive', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'negative', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._has_compatible_shallow_copy_type: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unique: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unique_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unique_consecutive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unique2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm_except_dim: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._weight_norm: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._weight_norm_interface: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._efficientzerotensor: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.zeros_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._standard_gamma_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._standard_gamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._dirichlet_grad: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'total', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sample_dirichlet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.poisson: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.binomial: [{'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'prob', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._sparse_csr_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_csr_prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_a', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_b', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_a', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_b', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._validate_sparse_coo_tensor_args: [{'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_compressed_tensor_args: [{'is_kwarg_only': 'False', 'name': 'compressed_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'layout', 'simple_type': 'Layout'}], + torch._C._VariableFunctions._validate_sparse_csr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_csc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_bsr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_bsc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._to_cpu: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._to_sparse_semi_structured: [{'is_kwarg_only': 'False', 'name': 'dense', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantize_per_tensor_dynamic: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'reduce_range', 'simple_type': 'bool'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_channel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_per_tensor_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_per_channel_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_enabled', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_learnable_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_learnable_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fused_moving_avg_obs_fake_quant: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fused_moving_avg_obs_fq_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._choose_qparams_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._saturate_weight_to_fp16: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.choose_qparams_optimized: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'numel', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'n_bins', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ratio', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'bit_width', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'indexing', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.cartesian_prod: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.combinations: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar1', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scalar2', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.can_cast: [{'is_kwarg_only': 'False', 'name': 'from_', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.promote_types: [{'is_kwarg_only': 'False', 'name': 'type1', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'type2', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._lstm_mps: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantized_lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._pack_padded_sequence: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions._pad_packed_sequence: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'total_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._masked_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.triu_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_check_errors: [{'is_kwarg_only': 'False', 'name': 'info', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'api_name', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'True', 'name': 'is_matrix', 'simple_type': 'bool'}], + torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._lu_with_info: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._histogramdd_bin_edges: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._histogramdd_from_bin_cts: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._histogramdd_from_bin_tensors: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._amp_foreach_non_finite_check_and_unscale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'inv_scale', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._amp_update_scale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'growth_tracker', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_growth_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'scale_backoff_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'growth_interval', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions._adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._VariableFunctions._adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._remove_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_serialization_subcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_parallel_materialize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_parallel', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'bool'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.segment_reduce: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions._nested_tensor_from_tensor_list: [{'is_kwarg_only': 'False', 'name': 'list', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_tensor_softmax_with_shape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._safe_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._transformer_encoder_layer_fwd: [{'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'use_gelu', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'norm_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._native_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_sdp_choice: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_attention_math: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_attention_math_for_mps: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_flash_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_flash_attention_for_cpu: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_efficient_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'attn_bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'compute_log_sumexp', 'simple_type': 'bool'}], + torch._C._VariableFunctions._scaled_dot_product_cudnn_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'attn_bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'compute_log_sumexp', 'simple_type': 'bool'}], + torch._C._VariableFunctions._triton_scaled_dot_attention: [{'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fill_mem_eff_dropout_mask_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dropout_p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'seed', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'offset', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._triton_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foobar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_sgd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'momentum_buffer_list', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'dampening', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'nesterov', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'is_first_step', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_sgd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'momentum_buffer_list', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dampening', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'nesterov', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'is_first_step', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adagrad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_sums', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'lr_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._propagate_xla_data: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}], + torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_linear: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.relu6: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.relu6_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.one_hot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_reorder_conv2d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_reorder_conv3d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.cross_entropy_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.elu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn._pad_circular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._nn._pad_enum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}], + torch._C._nn.pad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.conv_depthwise3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_dilated2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_dilated3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn._test_optional_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?'}], + torch._C._nn._test_optional_filled_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?', 'size': 2}], + torch._C._nn._test_optional_floatlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'ArrayRef?'}], + torch._C._nn._test_string_default: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_warn_in_autograd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.pad_sequence: [{'is_kwarg_only': 'False', 'name': 'sequences', 'simple_type': 'TensorList'}], + torch._C._nn.flatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._nn.unflatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'flat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._nn.scaled_dot_product_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_diagonal: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}], + torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}], + torch._C._linalg.linalg_vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg._linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._special.special_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_fftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.retain_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rename_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch.Tensor.rename: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'order', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'ellipsis_idx', 'simple_type': 'int64_t'}], + torch.Tensor.align_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.refine_names: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.absolute_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sgn_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.chalf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.addr_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.baddbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor._lazy_clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logical_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch.Tensor.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}], + torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fill_diagonal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch.Tensor.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch.Tensor.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.new_empty: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_empty_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_full: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch.Tensor.new_zeros: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_ones: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expand: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.expand_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List<::std::optional>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch.Tensor.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch.Tensor.mvlgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch.Tensor.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch.Tensor.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_pinned: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.repeat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}], + torch.Tensor.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.reshape_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}], + torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch.Tensor.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.slice_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch.Tensor.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sum_to_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.t_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}], + torch.Tensor.transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch.Tensor.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch.Tensor.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_strides: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_storage_offsets: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.type_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.unsqueeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.view_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch.Tensor.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch.Tensor.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.heaviside_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.addmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.sparse_resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}], + torch.Tensor.sparse_resize_and_clear_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}], + torch.Tensor.sparse_mask: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor._sparse_mask_projection: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor.to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sparse_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._dimI: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dense_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._dimV: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nnz: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_coalesced: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._coalesced_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coalesced', 'simple_type': 'bool'}], + torch.Tensor.indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.crow_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.col_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ccol_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.row_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}], + torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}], + torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor._to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor.to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor._to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor.to_mkldnn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.qscheme: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._autocast_to_reduced_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cuda_dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'cpu_dtype', 'simple_type': 'ScalarType'}], + torch.Tensor._autocast_to_full_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}], + torch.Tensor.is_set_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.masked_scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.tril_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.triu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.digamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.addbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t?'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.uniform_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cauchy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log_normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exponential_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.geometric_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch.Tensor.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch.Tensor.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch.Tensor.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch.Tensor.swapaxes_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch.Tensor.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.swapdims_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch.Tensor.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch.Tensor.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch.Tensor.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch.Tensor.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch.Tensor.lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.polygamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch.Tensor.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfinv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.atan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.arctan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.hypot_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igammac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.nextafter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch.Tensor.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch.Tensor.renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch.Tensor.unfold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch.Tensor.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.record_stream: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Stream'}], + torch.Tensor.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.to_padded_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'double'}], +} diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py new file mode 100644 index 0000000000000000000000000000000000000000..fa352cb5a3777e0b72746615d30eebe0dd08807b --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py @@ -0,0 +1,266 @@ +# mypy: ignore-errors + +import torch +import functools +from torch.testing import make_tensor +import unittest +from functorch.experimental.control_flow import map +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and, custom_types +from torch.testing._internal.opinfo.core import DecorateInfo +from torch.nn.attention.flex_attention import flex_attention, _create_empty_block_mask + +def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)], + args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2))) + +def inner_f(x, y0, y1): + return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())] + +def simple_map(xs, y0, y1): + def f(x, y0, y1): + return inner_f(x, y0, y1) + return map(f, xs, y0, y1) + +def nested_map(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + +def triple_nested_map(xs, y0, y1): + def f0(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + return map(f0, xs, y0, y1) + + +# Please consult with torch.export team before +# adding new entry to this list. +hop_that_doesnt_have_opinfo_test_allowlist = [ + "custom_function_call", + "autograd_function_apply", + "run_and_save_rng_state", + "run_with_rng_state", + "out_dtype", + "trace_wrapped", + "map", # T183144629 + "map_impl", + "with_effects", + "strict_mode", + "_export_tracepoint", + "call_torchbind", + "triton_kernel_wrapper_mutation", + "triton_kernel_wrapper_functional", + "hints_wrapper", +] + +torch.library.define( + "testlib::mutating_custom_op", + "(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)", + tags=torch.Tag.pt2_compliant_tag, +) + + +@torch.library.impl("testlib::mutating_custom_op", "cpu") +def foo_impl_cpu(x, z): + x.add_(5) + z.add_(5) + return x, z, x + z + + +@torch.library.impl("testlib::mutating_custom_op", "cuda") +def foo_impl_cuda(x, z): + x.add_(5) + z.add_(5) + return x, z, x + z + + +@torch.library.register_fake("testlib::mutating_custom_op") +def foo_impl_abstract(x, z): + return x, z, x + z + + +def sample_inputs_cond(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2)) + + +def simple_cond(x): + return torch.cond(x.sum() > 2, lambda x: (x.cos(),), lambda x: (x.sin(),), [x]) + + +def sample_inputs_auto_functionalize(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=False + ) + yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)) + + +def simple_auto_functionalize(x, z): + return torch.ops.testlib.mutating_custom_op(x, z) + + +def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + def score_mod(score, b, h, m, n): + return score + h + + q, k, v = (make_arg(2, 2, 128, 8, low=0.1, high=2) for _ in range(3)) + block_mask = _create_empty_block_mask(q, k) + yield SampleInput( + q, + k, + v, + score_mod, + block_mask + ) + +def sample_inputs_while_loop(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=False + ) + yield SampleInput( + torch.tensor(3), + make_arg(2, 3, 4, low=0.1, high=2), + ) + +def simple_while_loop(iter_t, x): + def cond_fn(iter_t, x): + return iter_t > 0 + + def body_fn(iter_t, x): + return iter_t - 1, x.cos() + + return torch._higher_order_ops.while_loop(cond_fn, body_fn, (iter_t, x)) + + +hop_db = [ + OpInfo( + name="map", + variant_test_name="simple", + op=simple_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="map", + variant_test_name="nested", + op=nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="map", + variant_test_name="triple_nested", + op=triple_nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + name="cond", + variant_test_name="simple", + op=simple_cond, + sample_inputs_func=sample_inputs_cond, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=True, + # "torch.compile with aot_autograd does not currently support double backward." + supports_gradgrad=False, + ), + OpInfo( + name="while_loop", + variant_test_name="simple", + op=simple_while_loop, + sample_inputs_func=sample_inputs_while_loop, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=False, + ), + OpInfo( + name="auto_functionalize", + variant_test_name="simple", + op=simple_auto_functionalize, + sample_inputs_func=sample_inputs_auto_functionalize, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + supports_autograd=False, + ), + OpInfo( + name="flex_attention", + variant_test_name="simple", + op=flex_attention, + sample_inputs_func=sample_inputs_flex_attention, + dtypes=custom_types(torch.float16, torch.float32), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"), + ), + ), + OpInfo( + name="flex_attention_backward", + variant_test_name="simple", + op=flex_attention, + sample_inputs_func=sample_inputs_flex_attention, + dtypes=custom_types(torch.float16, torch.float32), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"), + DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"), + ), + ) +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02a9fcc5405e5148d1ee533473a613b1063879fa --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py @@ -0,0 +1,722 @@ +# mypy: ignore-errors + +# Torch +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 +import torch.nn.functional as F +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +from torch.testing._internal.common_nn import module_tests, new_module_tests +from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like + +import collections +from copy import deepcopy +from typing import Any, Dict, List, Union +import math # noqa: F401 + +# Testing utils +from torch import inf + +assert torch.get_default_dtype() == torch.float32 + +L = 20 +M = 10 +S = 5 + + +def unpack_variables(args): + if isinstance(args, tuple): + return tuple(unpack_variables(elem) for elem in args) + else: + return args + +class dont_convert(tuple): + pass + +non_differentiable = collections.namedtuple('non_differentiable', ['tensor']) + +def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None): + if not isinstance(call_args, tuple): + call_args = (call_args,) + + def map_arg(arg): + def maybe_non_contig(tensor): + if not non_contiguous or tensor.numel() < 2: + return tensor.clone() + + return noncontiguous_like(tensor) + + def conjugate(tensor): + return tensor.conj() + + if isinstance(arg, (torch.Size, dont_convert)): + return arg + elif isinstance(arg, tuple) and len(arg) == 0: + var = conjugate(torch.randn((), dtype=dtype, device=device)) + var.requires_grad = requires_grad + return var + elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor): + return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad) + # double check casting + elif isinstance(arg, non_differentiable): + if isinstance(arg.tensor, torch.Tensor): + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + elif isinstance(arg, torch.Tensor): + if arg.is_complex() != dtype.is_complex: + raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ", + "which is not supported for now") + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards + v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone() + v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex()) + return v + elif callable(arg): + return map_arg(arg(dtype=dtype, device=device)) + else: + return arg + args_out = tuple(map_arg(arg) for arg in call_args) + kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {} + return args_out, kwargs_out + +# NB: JIT script tests for all nn functional interfaces, script mode does +# not support in_place operations yet, so no inplace operation tests added. +# removed all the deprecated functions +# +# ( +# method name, +# input size/constructing fn, +# args (tuple represents shape of a tensor arg), +# test variant name(will be used at test name suffix, +# 'inplace' skips grad tests), // optional +# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional +# fn to determine if test should be skipped, // optional +# fn mapping output to part that should be gradcheck'ed, // optional +# kwargs for function, // optional +# ) +nn_functional_tests = [ + ('conv1d', (S, S, S), ((S, S, S),)), + ('conv2d', (S, S, S, S), ((S, S, S, S),)), + ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_transpose1d', (S, S, S), ((S, S, S),)), + ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)), + ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)), + ('avg_pool1d', (S, S, S), (3,)), + ('avg_pool2d', (S, S, S, S), (3,), '', (True,)), + ('avg_pool3d', (S, S, S, S, S), (3,)), + ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)), + ('max_pool1d', (S, S, S), (2, 1)), + ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'), + ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')), + ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')), + ('max_pool3d', (S, S, S, S, S), (2, 1)), + ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)), + ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)), + ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)), + ('lp_pool1d', (S, S, S), (2., 3, 2,)), + ('lp_pool2d', (S, S, S, S), (2., 3, 2,)), + ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)), + ('adaptive_max_pool1d', (S, S, S), (5,)), + ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)), + ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)), + ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)), + ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)), + ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)), + ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')), + ('alpha_dropout', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S, S), (0.5,), 'batched'), + ('dropout3d', (S, S, S, S), (0.5,)), + ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'), + ('feature_alpha_dropout', (S, S, S), (0.5,)), + ('threshold', (S, S, S), (0.1, 2.), '', (True,)), + ('threshold', (S, S, S), (0.1, 2., True), 'inplace'), + ('relu', (S, S, S), (), '', (True,)), + ('relu', (S, S, S), (), 'inplace'), + ('glu', (S - 1, S - 1, S - 1), (),), + ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)), + ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'), + ('relu6', (S, S, S), (), '', (True,)), + ('relu6', (S, S, S), (True), 'inplace'), + ('elu', (S, S, S), (0.9,),), + ('elu', (S, S, S), (0.9, True), 'inplace'), + ('selu', (S, S, S), (),), + ('selu', (S, S, S), (True), 'inplace'), + ('celu', (S, S, S), (0.9,),), + ('celu', (S, S, S), (0.9, True), 'inplace'), + ('leaky_relu', (S, S, S), (0.02,), '', (True,)), + ('leaky_relu', (S, S, S), (0.02,), 'inplace'), + ('rrelu', (S, S), (0.1, 0.3, False),), + ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'), + ('hardshrink', (S, S, S), (0.4,), '', (True,)), + ('tanhshrink', (S, S, S), (),), + ('softsign', (S, S, S), (),), + ('softplus', (S, S, S), (), '', (True,)), + ('softmin', (S, S, S), (0,),), + ('softmax', (S, S, S), (0,), '', (True,)), + ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)), + ('tanh', (S, S, S), (), '', (True,)), + ('sigmoid', (S, S, S), (), '', (True,)), + ('silu', (S, S, S), (), '', (True,)), + ('log_softmax', (S, S, S), (0,), '', (True,)), + ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])), + ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])), + ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),), + ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)), + ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ), + 'training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), True, ), + 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, True, ), + 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, None, False, ), + 'inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ), + 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), False, ), + 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, False, ), + 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')), + ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),), + ('layer_norm', (S, S, S, S), ([5],), '', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)), + non_differentiable(torch.rand(S))), 'with_weight_and_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])), + ('group_norm', (S, S, S), (1, torch.rand(5),),), + ('local_response_norm', (S, S, S), (2, ),), + ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'), + ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),), + ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),), + ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),), + ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('margin_ranking_loss', (S,), ((S,), (S,)),), + ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),), + ('pixel_shuffle', (1, 9, 4, 4), (3,),), + ('pixel_unshuffle', (1, 1, 12, 12), (3,),), + ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),), + ('pad', (3, 3, 4, 2), ([1, 1],),), + ('pairwise_distance', (S, S), ((S, S),),), + ('pdist', (S, S), (),), + ('cosine_similarity', (S, S), ((S, S),),), + ('triplet_margin_loss', (S, S), ((S, S), (S, S)),), + ('normalize', (S, S, S), (),), + ('unfold', (S, S, S, S), ([2, 3]),), + ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),), + ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),), + ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),), + ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)), + 1, 1., non_differentiable(torch.randn(S))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), + (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'), + ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(), + (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long), + torch.randint(1, S, (S,), dtype=torch.long))), + ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'), + ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False), + 'nearest_4d_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False), + 'nearest_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False), + 'bilinear_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False), + 'bilinear_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False), + 'bicubic_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False), + 'bicubic_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False), + 'nearest_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False), + 'nearest_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False), + 'linear_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False), + 'linear_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False), + 'nearest_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False), + 'nearest_5d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False), + 'trilinear_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False), + 'trilinear_5d_with_size_not_recompute_scale_factor'), +] + +script_template = ''' +def the_method({}): + return {} +''' + +def value_to_literal(value): + if isinstance(value, str): + # Quotes string and escapes special characters + return ascii(value) + if isinstance(value, torch.Tensor): + return 'torch.' + str(value) + else: + return str(value) + +def get_call(method_name, func_type, args, kwargs): + kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()]) + self_arg = args[0] + if func_type == 'method': + args = args[1:] + + argument_str = ', '.join(args) + argument_str += ', ' if len(args) and len(kwargs) else '' + argument_str += kwargs_str + + if func_type == 'functional' or func_type == 'function': + call = f'torch.{method_name}({argument_str})' + elif func_type == 'method': + call = f'{self_arg}.{method_name}({argument_str})' + elif func_type == 'nn_functional': + call = f'torch.nn.functional.{method_name}({argument_str})' + else: + raise TypeError('Unsupported function type') + + return call + +def get_constant(x): + if x == inf: + return 'math.inf' + if x == -inf: + return '-math.inf' + return x + +def get_script_args(args): + formals: List[str] = [] + tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + actuals: List[str] = [] + for arg in args: + if isinstance(arg, torch.Tensor): + name = f'i{len(formals)}' + formals.append(name) + actuals.append(name) + tensors.append(arg) + elif is_iterable_of_tensors(arg): + name = f'i{len(formals)}' + formals.append(name + ': List[torch.Tensor]') + actuals.append(name) + tensors.append(list(arg)) + elif isinstance(arg, str): + actuals.append(f"'{arg}'") + else: + actuals.append(str(get_constant(arg))) + return (formals, tensors, actuals) + +# create a script function from (name, func_type, output_process_fn), +# and returns the compiled function and example inputs +def gen_script_fn_and_args(method_name, func_type, *args, **kwargs): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + return CU.the_method, tensors + +# create a script function from (name, func_type), +# returns a function takes in (args, kwargs) and runs the compiled function +def create_script_fn(self, method_name, func_type): + # function returns tuple containing original output and + # filtered output to be used in checking gradients + def script_fn(*args, **kwargs): + fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs) + self.assertExportImport(fn.graph, tensors) + output = fn(*tensors) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined] + return output + return script_fn + +class SplitInputs: + all_tensors: List[Any] + tensor_args: List[Any] + nontensor_args: List[Any] + arg_types: List[str] + tensor_kwargs: Dict[str, Any] + kwarg_order: List[str] + nontensor_kwargs: Dict[str, Any] + kwarg_types: Dict[str, Any] + + @staticmethod + def _is_tensor_input(arg): + return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg) + + def __init__(self, args, kwargs): + self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args] + self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()} + self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)] + self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)] + self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)} + self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)} + self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]] + self.kwarg_order = [k for k, v in kwargs.items()] + + def nontensors_match(self, other: 'SplitInputs'): + if self.arg_types != other.arg_types: + return False + if self.kwarg_types != other.kwarg_types: + return False + if self.kwarg_order != other.kwarg_order: + return False + if self.nontensor_args != other.nontensor_args: + return False + if self.nontensor_kwargs != other.nontensor_kwargs: + return False + return True + +# make a new function where all non-tensor arguments in 'args' have been partially +# applied, and all tensor arguments remain. +# used to trace functions when some arguments are not tensors +def partial_apply_nontensors(fn, args, kwargs): + inputs = SplitInputs(args, kwargs) + + def new_fn(*tensors_): + tensors = iter(tensors_) + full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)] + full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()} + return fn(*full_args, **full_kwargs) + + return new_fn, inputs + +# create a trace function from input fn +def create_traced_fn(self, fn, cache_traced_fn=False): + def traced_fn(*inputs, **kwargs): + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `check_against_reference` already does all the checks + # against python function + fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs) + if not cache_traced_fn or not hasattr(traced_fn, 'traced'): + traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False) + self.assertExportImport(traced.graph, split_inputs.all_tensors) + output = traced(*split_inputs.all_tensors) + if cache_traced_fn: + traced_fn.traced = traced + traced_fn.split_inputs = split_inputs + else: + # Guard to check that nontensor inputs are the same as during tracing + self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs)) + output = traced_fn.traced(*split_inputs.all_tensors) + traced = traced_fn.traced + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined] + traced_fn.graph = traced.graph # type: ignore[attr-defined] + return output + return traced_fn + +# known to be failing in script +EXCLUDE_SCRIPT = { + 'test_norm_fro_default', + 'test_norm_fro_cpu', + 'test_norm_nuc', + 'test_norm_fro', + 'test_norm_nuc_batched', + + # aten op has additional cudnn argument + 'test_nn_unfold', + + # flaky test - TODO fix + 'test_nn_ctc_loss', + + # unknown builtin op + 'test_nn_fold', + + # jit doesn't support sparse tensors. + 'test_to_sparse', + 'test_to_sparse_dim', +} + +# generates a script function and set of example inputs +# from a specified test in the format of nn_functional_tests +def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args): + test_name = 'test_nn_' + name + + if variant_name != '': + test_name = test_name + '_' + variant_name + + no_grad = variant_name == 'inplace' + + self_variable = create_input((self_size,))[0][0] + kwargs = None + + # need to record this because methods can change the size (e.g. unsqueeze) + args_variable, kwargs_variable = create_input(args) + + self_tensor = deepcopy(self_variable.data) + args_tensor = deepcopy(unpack_variables(args_variable)) + + f_args_variable = (self_variable,) + args_variable + f_args_tensor = (self_tensor,) + args_tensor + with torch._jit_internal._disable_emit_hooks(): + script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable) + return script_fn, inputs + + +# additional modules test +# TODO: delete this list once we make all nn_tests work +additional_module_tests = [ + { + 'module_name': 'Bilinear', + 'constructor_args': (S, S, M), + 'input_size': (S, S), + 'extra_args': ((S, S),) + }, + { + 'module_name': 'RNNCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'LSTMCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'GRUCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'MultiheadAttention', + 'constructor_args': (128, 8), + 'input_size': (10, 8, 128), + 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)), + 'slowTest': True + }, + { + 'module_name': 'Transformer', + 'constructor_args': (1, 1, 1, 1, 2), + 'input_size': (3, 1, 1), + 'extra_args': (torch.randn(1, 1, 1),), + 'slowTest': True + } +] + +EXCLUDE_SCRIPT_MODULES = { + 'test_nn_AdaptiveAvgPool2d_tuple_none', + 'test_nn_AdaptiveAvgPool3d_tuple_none', + 'test_nn_AdaptiveMaxPool2d_tuple_none', + 'test_nn_AdaptiveMaxPool3d_tuple_none', + + # Doesn't use future division, so this is not supported + 'test_nn_CrossMapLRN2d', + # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented + 'test_nn_TransformerDecoderLayer_gelu_activation', + 'test_nn_TransformerDecoderLayer_relu_activation', + 'test_nn_TransformerEncoderLayer_gelu_activation', + 'test_nn_TransformerEncoderLayer_relu_activation', + 'test_nn_Transformer_multilayer_coder', +} + +script_method_template = ''' +def forward({}): + return {} +''' + +def create_script_module(self, nn_module, constructor_args, *args, **kwargs): + def script_module(*args, **kwargs): + formals, tensors, actuals = get_script_args(args) + + method_args = ', '.join(['self'] + actuals) + call_args_str = ', '.join(actuals) + call = f"self.submodule({call_args_str})" + script = script_method_template.format(method_args, call) + + submodule_constants = [] + if kwargs.get('is_constant'): + submodule_constants = ['submodule'] + + # Create module to use the script method + class TheModule(torch.jit.ScriptModule): + __constants__ = submodule_constants + + def __init__(self) -> None: + super().__init__() + self.submodule = nn_module(*constructor_args) + + def make_module(script): + module = TheModule() + # check __repr__ + str(module) + module.define(script) + return module + + module = make_module(script) + if self: + self.assertExportImportModule(module, tensors) + module(*args) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + create_script_module.last_graph = module.graph # type: ignore[attr-defined] + return module + return script_module + +def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + # to clean up IR + torch._C._jit_pass_inline(CU.the_method.graph) + torch._C._jit_pass_constant_propagation(CU.the_method.graph) + torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name) + +def get_nn_module_name_from_kwargs(**kwargs): + if 'module_name' in kwargs: + return kwargs['module_name'] + elif 'fullname' in kwargs: + return kwargs['fullname'] + elif 'constructor' in kwargs: + return kwargs['constructor'].__name__ + +def get_nn_mod_test_name(**kwargs): + if 'fullname' in kwargs: + test_name = kwargs['fullname'] + else: + test_name = get_nn_module_name_from_kwargs(**kwargs) + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + return f'test_nn_{test_name}' + +def get_nn_module_class_from_kwargs(**kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + index = name.find("_") + if index == -1: + return name + else: + return name[0:name.find("_")] + +def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + + if 'desc' in kwargs and 'eval' in kwargs['desc']: + # eval() is not supported, so skip these tests + return + + test_name = name + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + test_name = get_nn_mod_test_name(**kwargs) + + if test_name in EXCLUDE_SCRIPT_MODULES: + return + if 'constructor' in kwargs: + nn_module = kwargs['constructor'] + else: + nn_module = getattr(torch.nn, name) + + if "FunctionalModule" in str(nn_module): + return + + if 'constructor_args_fn' in kwargs: + constructor_args = kwargs['constructor_args_fn']() + else: + constructor_args = kwargs.get('constructor_args', ()) + + # Set up inputs from tuple of sizes or constructor fn + input_dtype = torch.double + if 'input_fn' in kwargs: + input = kwargs['input_fn']() + if isinstance(input, torch.Tensor): + input = (input,) + + if all(tensor.is_complex() for tensor in input): + input_dtype = torch.cdouble + else: + input = (kwargs['input_size'],) + + # Extra parameters to forward() + if 'extra_args' in kwargs: + input = input + kwargs['extra_args'] + + if 'target_size' in kwargs: + input = input + (kwargs['target_size'],) + elif 'target_fn' in kwargs: + if torch.is_tensor(input): + input = (input,) + input = input + (kwargs['target_fn'](),) + + args_variable, kwargs_variable = create_input(input, dtype=input_dtype) + f_args_variable = deepcopy(unpack_variables(args_variable)) + out_var = deepcopy(f_args_variable) + + args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable) + + return mod, out_var + + +def get_all_nn_module_tests(): + return module_tests + new_module_tests + additional_module_tests diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c7fa261f99823292d8132fcc549ee281d4fd1f --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py @@ -0,0 +1,893 @@ +# mypy: ignore-errors + +# Torch +from torch.autograd import Variable +from torch.autograd.function import _nested_map +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 + +from torch.onnx import OperatorExportTypes +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized +import zipfile +import functools + +# Testing utils +from torch.testing import FileCheck +from torch.testing._internal.common_utils import IS_WINDOWS, \ + freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \ + is_iterable_of_tensors +from torch.testing._internal.common_jit import JitCommonTestCase +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from contextlib import contextmanager +from functools import reduce +from io import StringIO +from collections import defaultdict + +import importlib.util +import inspect +import io +import math +import os +import pickle +import sys +import tempfile +import textwrap +from importlib.abc import Loader +from typing import Any, Dict, List, Tuple, Union + +RUN_CUDA = torch.cuda.is_available() +RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1 +RUN_CUDA_HALF = RUN_CUDA +# HIP supports half, no version check necessary +if torch.cuda.is_available() and not torch.version.hip: + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(torch.cuda.device_count()): + major = torch.cuda.get_device_capability(d)[0] + if (major < 6): + RUN_CUDA_HALF = False + +def execWrapper(code, glob, loc): + exec(code, glob, loc) + +def do_input_map(fn, input): + return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input) + +def clear_class_registry(): + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + +def get_execution_plan(graph_executor_state): + execution_plans = list(graph_executor_state.execution_plans.values()) + num_plans = len(execution_plans) + if num_plans != 1: + raise RuntimeError('This test assumes this GraphExecutor should ' + f'only have one execution plan, got: {num_plans}') + return execution_plans[0] + +class _AssertRaisesRegexWithHighlightContext: + """ + A context manager that is useful for checking that error messages highlight + the correct part of the source code. + """ + + def __init__(self, test_case, exception, regex, highlight): + self.test_case = test_case + self.exception_type = exception + self.regex = regex + self.highlight = highlight + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + with self.test_case.assertRaisesRegex(self.exception_type, self.regex): + if type: + raise value + + if self.highlight: + FileCheck().check_source_highlighted(self.highlight).run(str(value)) + + return True + +FUSION_GROUP = "prim::TensorExprGroup" + +class JitTestCase(JitCommonTestCase): + _do_cuda_memory_leak_check = True + _restored_warnings = False + + class capture_stdout(list): + """ + Replace sys.stdout with a temporary StringIO + """ + def __enter__(self): + self.sys_stdout = sys.stdout + self.stringio = StringIO() + sys.stdout = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stdout = self.sys_stdout + + class capture_stderr(list): + """ + Replace sys.stderr with a temporary StringIO + """ + def __enter__(self): + self.sys_stderr = sys.stderr + self.stringio = StringIO() + sys.stderr = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stderr = self.sys_stderr + + def setHooks(self): + torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook) + + def clearHooks(self): + torch._C._jit_set_emit_hooks(None, None) + + def setUp(self): + super().setUp() + # unittest overrides all warning filters and forces all of them to show up + # after we install our own to silence those coming from inside PyTorch. + # This will ensure that our filter still takes precedence. + if not JitTestCase._restored_warnings: + torch.jit.TracerWarning.ignore_lib_warnings() + JitTestCase._restored_warnings = True + self.setHooks() + + def tearDown(self): + super().tearDown() + # needs to be cleared because python might be unloaded before + # the callback gets destructed + self.clearHooks() + clear_class_registry() + + def assertAllFused(self, graph, except_for=()): + + # note this helper collects nodes on 'fast path' only + # i.e. the true blocks of specialized checks + def get_nodes_and_parents_recursively(block, kind, acc): + for node in block.nodes(): + if node.kind() == kind: + acc[block].append(node) + elif node.kind() == 'prim::DifferentiableGraph': + get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc) + elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or + node.inputs().__next__().node().kind() == 'prim::TypeCheck' or + node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'): + get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc) + else: + for inner_block in node.blocks(): + get_nodes_and_parents_recursively(inner_block, kind, acc) + + allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate', + 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for) + + fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list) + get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups) + self.assertTrue(len(fusion_groups) == 1, f'got {graph}') + (graph, fusion_nodes) = next(iter(fusion_groups.items())) + # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes` + self.assertTrue(len(fusion_nodes) == 1, f'got {graph}') + self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()), + f'got {graph}') + + def _isHookExceptionOk(self, e): + se = str(e) + allowed = ("Could not export Python function", + "closures are not exportable") + for a in allowed: + if a in se: + return True + return False + + def _compared_saved_loaded(self, m): + def extract_files(buffer): + # crack open the zip format to get at the main module code + archive = zipfile.ZipFile(buffer) + # check that we have no duplicate names + self.assertEqual(len(set(archive.namelist())), len(archive.namelist())) + files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist())) + # unwrap all the code files into strings + code_files_str = filter(lambda x: x.endswith('.py'), files) + code_files_stream = (archive.open(f) for f in code_files_str) + code_files = ("".join([line.decode() for line in file]) for file in code_files_stream) + + # unpickled all the debug files + debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files) + debug_files_stream = (archive.open(f) for f in debug_files_str) + debug_files = (pickle.load(f) for f in debug_files_stream) + return code_files, debug_files + + # disable the hook while we parse code, otherwise we will re-enter the hook + with torch._jit_internal._disable_emit_hooks(): + try: + # short-circuit if this is an empty function or module + if len(m.code) == 0: + return + if isinstance(m, torch._C.ScriptModule): + if len(m._method_names()) == 0: + return + + # save the module to a buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + # copy the data in the buffer so we can restore it later. This + # is because py2 and py3 have different semantics with zipfile + # and it's easier to just work with a fresh copy each time. + buffer_copy = buffer.getvalue() + + code_files, debug_files = extract_files(buffer) + + except RuntimeError as e: + if not self._isHookExceptionOk(e): + raise + else: + return + + # import the model again (from a the copy we made of the original) + buffer2 = io.BytesIO(buffer_copy) + imported = torch.jit.load(buffer2) + + # save it again + saved_module_buffer_2 = io.BytesIO() + torch.jit.save(imported, saved_module_buffer_2) + + saved_module_buffer_2.seek(0) + code_files_2, debug_files_2 = extract_files(saved_module_buffer_2) + + for a, b in zip(code_files, code_files_2): + self.assertMultiLineEqual(a, b) + + if isinstance(m, torch._C.ScriptModule): + self.assertTrue(torch._C._ivalue_tags_match(m, imported._c)) + + + def emitFunctionHook(self, func): + # func has invalid names for export, skip the jitter check + if func.name == "" or "aten::" in func.name: + return + self._compared_saved_loaded(func) + + def emitModuleHook(self, module): + self._compared_saved_loaded(module) + + + def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None) + torch.jit.save(m, buffer) + m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + + if not also_test_file: + return imported + + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + f = tempfile.NamedTemporaryFile(delete=False) + try: + f.close() + imported.save(f.name) + result = torch.jit.load(f.name, map_location=map_location) + finally: + os.unlink(f.name) + + result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + return result + + def assertGraphContains(self, graph, kind, consider_subgraphs=False): + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + self.assertTrue(count > 0) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + self.assertTrue(len(out_nodes) > 0) + + def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False): + def perform_assert(graph, kind, actual, expected, consider_subgraphs): + if actual == expected: + return + subgraph = 'including' if consider_subgraphs else 'excluding' + raise AssertionError( + f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}') + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + perform_assert(graph, kind, count, num_kind_nodes, + consider_subgraphs) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + perform_assert(graph, kind, len(out_nodes), num_kind_nodes, + consider_subgraphs) + + def assertExpectedONNXGraph(self, g, *args, **kwargs): + g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX) + self.assertExpectedGraph(g, *args, **kwargs) + + def assertExpectedGraph(self, trace, *args, **kwargs): + if isinstance(trace, torch._C.Graph): + graph = trace + else: + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + self.assertExpected(str(graph), *args, **kwargs) + + def run_pass(self, name, trace): + if isinstance(trace, torch._C.Graph): + graph = trace + set_graph = False + else: + set_graph = True + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + result = getattr(torch._C, '_jit_pass_' + name)(graph) + if result is not None and not isinstance(result, bool): + graph = result + torch._C._jit_pass_lint(graph) + + if set_graph: + trace.set_graph(graph) + return graph + + def get_frame_vars(self, frames_up): + frame = inspect.currentframe() + if not frame: + raise RuntimeError("failed to inspect frame") + i = 0 + while i < frames_up + 1: + frame = frame.f_back + if not frame: + raise RuntimeError("failed to get frame") + i += 1 + defined_vars: Dict[str, Any] = {} + defined_vars.update(frame.f_locals) + defined_vars.update(frame.f_globals) + return defined_vars + + def assertRaisesRegexWithHighlight(self, exception, regex, highlight): + return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight) + + def checkScriptRaisesRegex(self, script, inputs, exception, regex, + name=None, outputs=None, capture_output=False, + frames_up=1, profiling=ProfilingMode.PROFILING): + """ + Checks that a given function will throw the correct exception, + when executed with normal python, the string frontend, and the + AST frontend. Logic taken from `checkScript` (see comments there + for details) + """ + with enable_profiling_mode_for_profiling_tests(): + # Normal Python + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + else: + python_fn = script + + python_fn(*inputs) + + # String frontend + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + string_frontend = getattr(cu, name) + else: + source = textwrap.dedent(inspect.getsource(script)) + cu = torch.jit.CompilationUnit(source, _frames_up=frames_up) + string_frontend = getattr(cu, script.__name__) + + string_frontend(*inputs) + + # Python AST frontend + if not isinstance(script, str): + with self.assertRaisesRegex(exception, regex): + ge = torch.jit.script(python_fn) + ge(*inputs) + + def checkBailouts(self, model, inputs, expected): + state = model.get_debug_state() + plan = get_execution_plan(state) + num_bailouts = plan.code.num_bailouts() + for i in range(0, num_bailouts): + plan.code.request_bailout(i) + bailout_outputs = model(*inputs) + self.assertEqual(bailout_outputs, expected) + + def checkScript(self, + script, + inputs, + name='func', + optimize=True, + inputs_requires_grad=False, + capture_output=False, + frames_up=1, + profiling=ProfilingMode.PROFILING, + atol=None, + rtol=None): + """ + Checks that a given script generates the same output as the Python + version using the given inputs. + """ + with torch.jit.optimized_execution(optimize): + with enable_profiling_mode_for_profiling_tests(): + extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs) + if isinstance(script, str): + # Compile the string to a Script function + # with enable_profiling_mode(): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + + # Execute the Python function so we can run it later and get its + # outputs + + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + scripted_fn = getattr(cu, name) + else: + + # Check the string frontend first + source = textwrap.dedent(inspect.getsource(script)) + self.checkScript( + source, + inputs, + script.__name__, + optimize=optimize, + inputs_requires_grad=inputs_requires_grad, + capture_output=capture_output, + profiling=profiling, + frames_up=2) + + # Continue checking the Python frontend + scripted_fn = torch.jit.script(script, _frames_up=1) + python_fn = script + + if inputs_requires_grad: + recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs) + else: + recording_inputs = inputs + + if capture_output: + with self.capture_stdout() as script_stdout: + script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as opt_script_stdout: + opt_script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as _python_stdout: + python_outputs = python_fn(*inputs) + if not IS_WINDOWS: + self.assertExpected(script_stdout[0], subname='stdout') + self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol) + else: + # profiling run + script_outputs = scripted_fn(*recording_inputs) + if inputs_requires_grad or extra_profile_runs: + opt_script_outputs = scripted_fn(*recording_inputs) + # optimized run + opt_script_outputs = scripted_fn(*recording_inputs) + if TEST_BAILOUTS: + self.checkBailouts(scripted_fn, inputs, opt_script_outputs) + python_outputs = python_fn(*inputs) + self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol) + self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol) + return scripted_fn + + def checkTrace(self, func, reference_tensors, input_tensors=None, + drop=None, allow_unused=False, verbose=False, + inputs_require_grads=True, check_tolerance=1e-5, export_import=True, + _force_outplace=False, grad_atol=None, grad_rtol=None): + + # TODO: check gradients for parameters, not just inputs + def allSum(vs): + # drop allows us to remove some values from ever being used + # to test unused outputs + if drop is not None: + vs = vs[:-drop] + # we don't want all the grad for all the outputs to be the same + # so we multiply each by a constant + return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None) + if input_tensors is None: + input_tensors = reference_tensors + + def flatten_inputs(inputs): + def input_reduce(input, fn, acc): + if isinstance(input, torch.Tensor): + fn(input, acc) + elif isinstance(input, dict): + reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc) + else: + reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc) + return acc + return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), [])) + + nograd_inputs = reference_tensors + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + else: + recording_inputs = reference_tensors + + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `checkTrace` already does all the checks + # against python function + ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance, + _force_outplace=_force_outplace, check_trace=False) + + if export_import: + ge = self.getExportImportCopy(ge) + + if verbose: + print(ge.graph) + + # test no gradients case + outputs = func(*nograd_inputs) + outputs_ge = ge(*nograd_inputs) + self.assertEqual(outputs, outputs_ge) + + # test gradients case + outputs = func(*recording_inputs) + if inputs_require_grads: + grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs, + allow_unused=allow_unused) + + outputs_ge = ge(*recording_inputs) + if inputs_require_grads: + grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + + # test the grad grad case + outputs = func(*recording_inputs) + l1 = allSum(outputs) + if inputs_require_grads: + grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True, + allow_unused=allow_unused) + if inputs_require_grads: + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused) + + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + + outputs_ge = ge(*recording_inputs) + l1_ge = allSum(outputs_ge) + if inputs_require_grads: + grads_ge = torch.autograd.grad( + l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused) + + if inputs_require_grads: + l2_ge = (allSum(grads_ge) * l1_ge) + grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + for g2, g2_ge in zip(grads2, grads2_ge): + if g2 is None and g2_ge is None: + continue + self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4) + + return ge + + def checkModule(self, nn_module, args): + """ + Check that a nn.Module's results in Script mode match eager and that it + can be exported + """ + sm = torch.jit.script(nn_module) + + with freeze_rng_state(): + eager_out = nn_module(*args) + + with freeze_rng_state(): + script_out = sm(*args) + + self.assertEqual(eager_out, script_out) + self.assertExportImportModule(sm, args) + + return sm + +class NoTracerWarnContextManager: + def __enter__(self): + self.prev = torch._C._jit_get_tracer_state_warn() + torch._C._jit_set_tracer_state_warn(False) + + def __exit__(self, *args): + torch._C._jit_set_tracer_state_warn(self.prev) + +@contextmanager +def inline_everything_mode(should_inline): + old = torch._C._jit_get_inline_everything_mode() + torch._C._jit_set_inline_everything_mode(should_inline) + try: + yield + finally: + torch._C._jit_set_inline_everything_mode(old) + +@contextmanager +def set_fusion_group_inlining(inlining): + old = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(inlining) + try: + yield + finally: + torch._C._debug_set_fusion_group_inlining(old) + +# note: not re-entrant, use unnested only +@contextmanager +def disable_autodiff_subgraph_inlining(enabled=True): + torch._C._debug_set_autodiff_subgraph_inlining(not enabled) + try: + yield + finally: + torch._C._debug_set_autodiff_subgraph_inlining(True) + +def _inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(True): + fn(*args, **kwargs) + return wrapper + +# this exists for forward compatibility reasons temporarily. +# TODO(suo) remove +def _tmp_donotuse_dont_inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(False): + fn(*args, **kwargs) + return wrapper + +# make it easy to quicky define/trace a function for these tests +def _trace(*args, **kwargs): + def wrapper(func): + return torch.jit.trace(func, args, **kwargs) + return wrapper + + +def enable_cpu_fuser(fn): + def wrapper(*args, **kwargs): + torch._C._jit_override_can_fuse_on_cpu_legacy(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_set_te_must_use_llvm_cpu(False) + try: + fn(*args, **kwargs) + finally: + torch._C._jit_override_can_fuse_on_cpu_legacy(False) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_set_te_must_use_llvm_cpu(True) + return wrapper + + +def enable_cpu_fuser_if(cond): + if cond: + return enable_cpu_fuser + else: + def noop_fuser(fn): + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + return wrapper + return noop_fuser + +def get_forward(c): + return c._get_method('forward') + +def get_forward_graph(c): + return c._get_method('forward').graph + +def get_module_method(m, module, method): + return m._c.getattr(module)._get_method(method) + +def attrs_with_prefix(module, prefix): + return [x for x, _ in module._modules._c.items() + if x.startswith(prefix)] + +def warmup_backward(f, *args): + profiling_count = 3 + results = [] + for i in range(profiling_count): + if len(args) > 0: + r = torch.autograd.grad(f, *args) + results.append(r) + else: + f.backward(retain_graph=True) + + return results + +# TODO: Remove me once https://bugs.python.org/issue42666 is resolved +def make_global(*args): + for arg in args: + setattr(sys.modules[arg.__module__], arg.__name__, arg) + +# Helper function to eval Python3 code without causing a syntax error for +# this file under py2 +def _get_py3_code(code, fn_name): + with tempfile.TemporaryDirectory() as tmp_dir: + script_path = os.path.join(tmp_dir, 'script.py') + with open(script_path, 'w') as f: + f.write(code) + spec = importlib.util.spec_from_file_location(fn_name, script_path) + module = importlib.util.module_from_spec(spec) + loader = spec.loader + assert isinstance(loader, Loader) # Assert type to meet MyPy requirement + loader.exec_module(module) + fn = getattr(module, fn_name) + return fn + +class TensorExprTestOptions: + def __init__(self) -> None: + self.old_profiling_executor = torch._C._jit_set_profiling_executor(True) + self.old_profiling_mode = torch._C._get_graph_executor_optimize(True) + + self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu() + self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() + torch._C._jit_set_texpr_fuser_enabled(True) + self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(False) + self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu() + torch._C._jit_set_te_must_use_llvm_cpu(False) + + def restore(self): + torch._C._jit_set_profiling_executor(self.old_profiling_executor) + torch._C._get_graph_executor_optimize(self.old_profiling_mode) + + torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state) + torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state) + torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state) + torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining) + torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu) + +def clone_inputs(args): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(arg.detach().clone()) + elif is_iterable_of_tensors(arg): + inputs.append([t.detach().clone() for t in arg]) + else: + inputs.append(arg) + + return inputs + +def get_traced_sample_variant_pairs(device, dtype, op): + # tuples of (variant, sample) + outputs: List[Tuple[Any, Any]] = [] + + samples = op.sample_inputs(device, dtype) + + # Acquires variants to test + func = op.get_op() + method = op.get_method() + variants = { + # TODO: inplace tests currently fail, fix and add inplace variant + 'function': func, 'method': method, + } + + # TODO: find better way to standardize on op registration itself.. + has_fake_function = op.name in ["resize_", 'resize_as_'] + + if has_fake_function: + variants = {'method': getattr(torch.Tensor, op.name)} + + # In eager mode, these ops can take (Tensor, bool) args; but in + # JIT they can only take (Tensor, Scalar), and bool is not a + # scalar in the JIT type system. So to test these in JIT, the bool + # is converted to an int for the test. + ops_with_unsupported_bool_args = [ + { + "name": "div_floor_rounding", + "arg_idx": [0], + }, + { + "name": "div_no_rounding_mode", + "arg_idx": [0], + }, + { + "name": "div_trunc_rounding", + "arg_idx": [0], + }, + { + "name": "index_fill", + "arg_idx": [2], + }, + { + "name": "full_like", + "arg_idx": [0], + }, + { + "name": "mul", + "arg_idx": [0], + }, + { + "name": "new_full", + "arg_idx": [1], + }, + ] + + # doesn't support tracing + if has_fake_function: + return outputs + + for sample in samples: + for variant in variants.values(): + if variant is None: + continue + + if is_lambda(variant): + continue + + matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args) + for op_data in matching_ops: + for idx in op_data["arg_idx"]: + args = list(sample.args) + if len(sample.args) > idx and isinstance(sample.args[idx], bool): + args[idx] = int(args[idx]) + sample.args = tuple(args) + + outputs.append((variant, sample)) + + return outputs + +# types.LambdaType gave false positives +def is_lambda(lamb): + LAMBDA = lambda: 0 # noqa: E731 + return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..8b7faf45b3c3ceeb9a9eea55c4db6b3aad05f651 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/logging_tensor.py @@ -0,0 +1,182 @@ +# mypy: ignore-errors + +import torch +from torch.utils._pytree import tree_map +from typing import Iterator, List, Optional +import logging +import contextlib +import itertools +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.weak import WeakTensorKeyDictionary +import functools +from torch._C._profiler import gather_traceback, symbolize_tracebacks + +logger = logging.getLogger("LoggingTensor") + +_dtype_abbrs = { + torch.bfloat16: "bf16", + torch.float64: "f64", + torch.float32: "f32", + torch.float16: "f16", + torch.complex32: "c32", + torch.complex64: "c64", + torch.complex128: "c128", + torch.int8: "i8", + torch.int16: "i16", + torch.int32: "i32", + torch.int64: "i64", + torch.bool: "b8", + torch.uint8: "u8", +} + +# How the chain of calls works for LoggingTensor: +# 1. Call torch.sin +# 2. Attempt __torch_function__. In LoggingTensor torch function is disabled so we bypass it entirely +# 3. Enter dispatcher, wind your way through Autograd +# 4. Hit Python dispatch key, call __torch_dispatch__ + +# This Tensor can work with autograd in two ways: +# - The wrapped Tensor does not require gradients. In that case, the LoggingTensor +# can require gradients if the user asks for it as a constructor kwarg. +# - The wrapped Tensor can require gradients. In that case autograd will be tracked +# for the wrapped Tensor and the LoggingTensor itself cannot require gradients. +# WARNING: We allow these two possibilities for testing purposes. You should NEVER use both in a single +# test or you might get surprising behavior. + +# TODO: TensorBase should work +class LoggingTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + + context = contextlib.nullcontext + + @staticmethod + def __new__(cls, elem, *args, **kwargs): + # The wrapping tensor (LoggingTensor) shouldn't hold any + # memory for the class in question, but it should still + # advertise the same device as before + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + strides=elem.stride(), storage_offset=elem.storage_offset(), + # TODO: clone storage aliasing + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=kwargs.get("requires_grad", False) + ) + # ...the real tensor is held as an element on the tensor. + r.elem = elem.detach() if r.requires_grad else elem + return r + + def __repr__(self): + return super().__repr__(tensor_contents=f"{self.elem}") + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, cls) else e + + def wrap(e): + return cls(e) if isinstance(e, torch.Tensor) else e + + with cls.context(): + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004 + return rs + +class LoggingTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + rs = func(*args, **kwargs) + logging.getLogger("LoggingTensor").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) # noqa: G004 + return rs + +class LoggingTensorReentrant(LoggingTensor): + context = torch.overrides.enable_reentrant_dispatch + +# https://stackoverflow.com/questions/36408496/python-logging-handler-to-append-to-list +class LoggingTensorHandler(logging.Handler): + def __init__( + self, log_list: List[str], use_shortid_for_all_tensors: bool, + with_type: bool, tracebacks_list: Optional[List]) -> None: + logging.Handler.__init__(self) + self.log_list = log_list + self.use_shortid_for_all_tensors = use_shortid_for_all_tensors + self.tracebacks_list = tracebacks_list + self.memo = WeakTensorKeyDictionary() + self.next_id = 0 + self.with_type = with_type + + def _shortid(self, t: torch.Tensor) -> int: + if t not in self.memo: + self.memo[t] = self.next_id + self.next_id += 1 + return self.memo[t] + + def _fmt(self, a: object, with_type: bool = False) -> str: + cond_cls = torch.Tensor if self.use_shortid_for_all_tensors else LoggingTensor + if isinstance(a, cond_cls): + maybe_type = "" + if with_type and self.with_type: + maybe_type = f": {_dtype_abbrs[a.dtype]}[{', '.join(map(str, a.shape))}]" + x = f"${self._shortid(a)}{maybe_type}" + return x + else: + return repr(a) + + def emit(self, record): + fmt_args = ", ".join( + itertools.chain( + (str(tree_map(self._fmt, a)) for a in record.args[0]), + (f"{k}={str(tree_map(self._fmt, v))}" for k, v in record.args[1].items()), + ) + ) + fmt_rets = tree_map(functools.partial(self._fmt, with_type=True), record.args[2]) + self.log_list.append(f'{fmt_rets} = {record.msg}({fmt_args})') + if self.tracebacks_list is not None: + self.tracebacks_list.append(record.traceback) + +def log_input(name: str, var: object) -> None: + logger.info("input", (name,), {}, var) # noqa: PLE1205 + +class GatherTraceback(logging.Filter): + def __init__(self, python=True, script=True, cpp=False): + self.python = python + self.script = script + self.cpp = cpp + + def filter(self, record): + record.traceback = gather_traceback(python=self.python, script=self.script, cpp=self.cpp) + return True + +@contextlib.contextmanager +def capture_logs(is_mode=False, python_tb=False, script_tb=False, cpp_tb=False) -> Iterator[List[str]]: + collect_traceback = python_tb or script_tb or cpp_tb + log_list: List[str] = [] + tracebacks_list: List[str] = [] + handler = LoggingTensorHandler( + log_list, + with_type=True, + use_shortid_for_all_tensors=is_mode, + tracebacks_list=tracebacks_list if collect_traceback else None + ) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + logger.propagate = False + if collect_traceback: + logger.addFilter(GatherTraceback(python=python_tb, script=script_tb, cpp=cpp_tb)) + try: + if collect_traceback: + yield log_list, tracebacks_list + else: + yield log_list + finally: + symbolized_tracebacks = symbolize_tracebacks(tracebacks_list) + tracebacks_list.clear() + tracebacks_list.extend(symbolized_tracebacks) + logger.removeHandler(handler) + +@contextlib.contextmanager +def capture_logs_with_logging_tensor_mode(python_tb=False, script_tb=False, cpp_tb=False): + with LoggingTensorMode(), capture_logs(True, python_tb, script_tb, cpp_tb) as logs: + yield logs diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97c38f3560625213fbd59d09a9cfd22bad26ba04 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py @@ -0,0 +1,4 @@ +# mypy: ignore-errors + +import torch.testing._internal.opinfo.core +import torch.testing._internal.opinfo.definitions diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..231ce2235e75b6972933e559bbef31523ac6ba4a Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c556dd6e68a52190142ef75eea96608cd9929c0 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/core.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..028b1e1871884af289258c44290fc5de987cecb4 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__pycache__/refs.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa38511d4e971b092ca19f74adf33f997ff4d7a --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/core.py @@ -0,0 +1,2944 @@ +# mypy: ignore-errors + +import collections +import collections.abc +import math +import operator +import unittest +from dataclasses import asdict, dataclass +from enum import Enum +from functools import partial +from itertools import product +from typing import Any, Callable, Iterable, List, Optional, Tuple, Union + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + skipCPUIfNoFFT, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + get_all_dtypes, +) +from torch.testing._internal.common_utils import ( + is_iterable_of_tensors, + noncontiguous_like, + OPINFO_SAMPLE_INPUT_INDEX, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, + TrackedInputIter, +) +from torch.testing._internal.opinfo import utils +from torchgen.utils import dataclass_repr + + +# Reasonable testing sizes for dimensions +L = 20 +M = 10 +S = 5 +XS = 3 + +# Unique value to distinguish default from anything else +_NOTHING = object() + + +# Extension of getattr to support qualified names +# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm +def _getattr_qual(obj, name, default=_NOTHING): + try: + for path in name.split("."): + obj = getattr(obj, path) + return obj + except AttributeError: + if default is not _NOTHING: + return default + else: + raise + + +class DecorateInfo: + """Describes which test, or type of tests, should be wrapped in the given + decorators when testing an operator. Any test that matches all provided + arguments will be decorated. The decorators will only be applied if the + active_if argument is True.""" + + __slots__ = [ + "decorators", + "cls_name", + "test_name", + "device_type", + "dtypes", + "active_if", + ] + + def __init__( + self, + decorators, + cls_name=None, + test_name=None, + *, + device_type=None, + dtypes=None, + active_if=True, + ): + self.decorators = ( + list(decorators) + if isinstance(decorators, collections.abc.Sequence) + else [decorators] + ) + self.cls_name = cls_name + self.test_name = test_name + self.device_type = device_type + self.dtypes = dtypes + self.active_if = active_if + + # Validate dtypes + if self.dtypes is not None: + for dtype in self.dtypes: + assert isinstance(dtype, torch.dtype) + + def is_active(self, cls_name, test_name, device_type, dtype, param_kwargs): + return ( + self.active_if + and (self.cls_name is None or self.cls_name == cls_name) + and (self.test_name is None or self.test_name == test_name) + and (self.device_type is None or self.device_type == device_type) + and (self.dtypes is None or dtype in self.dtypes) + # Support callables over kwargs to determine if the decorator is active. + and ( + self.active_if(param_kwargs) + if isinstance(self.active_if, Callable) + else self.active_if + ) + ) + + +# FIXME +# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying +# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor +# or TensorList, however. +class SampleInput: + """Represents sample inputs to a function.""" + + __slots__ = [ + "input", + "args", + "kwargs", + "output_process_fn_grad", + "broadcasts_input", + "name", + ] + + def __init__( + self, + input, + *var_args, + args=None, + kwargs=None, + output_process_fn_grad=None, + broadcasts_input=None, + name=None, + **var_kwargs, + ): + # input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]). + # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...). + self.input = input + + # Allow calling either as SampleInput(input, args=args, kwargs=kwargs), or as + # SampleInput(input, *args, **kwargs) but not to mix the two forms + if args is not None or kwargs is not None: + assert ( + not var_args and not var_kwargs + ), """ +A SampleInput can be constructed "naturally" with *args and **kwargs or by +explicitly setting the "args" and "kwargs" parameters, but the two +methods of construction cannot be mixed!""" + elif len(var_args) or len(var_kwargs): + assert ( + output_process_fn_grad is None + and broadcasts_input is None + and name is None + ), """ +A SampleInput constructed "naturally" with *args and **kwargs +cannot specify additional metadata in keyword arguments""" + + self.args = args if args is not None else var_args + assert isinstance(self.args, tuple) + self.kwargs = kwargs if kwargs is not None else var_kwargs + assert isinstance(self.kwargs, dict) + + self.output_process_fn_grad = ( + output_process_fn_grad + if output_process_fn_grad is not None + else lambda x: x + ) + self.name = name if name is not None else "" + + # Specifies if `self.input` is broadcasted or not, + # given that the operator supports broadcasting. + # This field is used to verify the behavior for inplace variant. + # + # If a SampleInput is marked with `broadcasts_input=True`, + # it is verified that we get a `RuntimeError` with this sample, + # and inplace variant. Also inplace grad{grad} tests are skipped, + # for such inputs (as they will error out otherwise). + self.broadcasts_input = ( + broadcasts_input if broadcasts_input is not None else False + ) + + def with_metadata( + self, *, output_process_fn_grad=None, broadcasts_input=None, name=None + ): + if output_process_fn_grad is not None: + self.output_process_fn_grad = output_process_fn_grad + if broadcasts_input is not None: + self.broadcasts_input = broadcasts_input + if name is not None: + self.name = name + return self + + def _repr_helper(self, formatter): + # Helper function to return the details of the SampleInput as `str` + # It consolidates all the fields of SampleInput and allows, + # formatting the fields like `input`, `args`, etc with `formatter` + # callable to customize the representation. + # Look at `summary` method for example. + arguments = [ + f"input={formatter(self.input)}", + f"args={formatter(self.args)}", + f"kwargs={formatter(self.kwargs)}", + f"broadcasts_input={self.broadcasts_input}", + f"name={repr(self.name)}", + ] + + return f'SampleInput({", ".join(a for a in arguments if a is not None)})' + + def __repr__(self): + return self._repr_helper(lambda x: x) + + def summary(self): + # Returns the SampleInput details in a more + # friendly format. + # It formats `Tensor` and `TensorList` + # in a more condensed representation. + def formatter(arg): + # Format any instance of `Tensor` (standalone, in list, or in dict) + # by Tensor[TensorShape] + # Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4] + if isinstance(arg, torch.Tensor): + shape = str(tuple(arg.shape)) + dtype = str(arg.dtype) + device = str(arg.device) + contiguity_suffix = "" + # NB: sparse CSR tensors annoyingly return is_sparse=False + is_sparse = arg.is_sparse or arg.layout == torch.sparse_csr + if not is_sparse and not arg.is_contiguous(): + contiguity_suffix = ", contiguous=False" + return f'Tensor[size={shape}, device="{device}", dtype={dtype}{contiguity_suffix}]' + elif isinstance(arg, dict): + return {k: formatter(v) for k, v in arg.items()} + elif is_iterable_of_tensors(arg): + return "TensorList[" + ", ".join(map(formatter, arg)) + "]" + elif isinstance(arg, (list, tuple)): # Handle list, tuple + return "(" + ",".join(map(formatter, arg)) + ")" + + return repr(arg) + + return self._repr_helper(formatter) + + # Applies the transform f(t) -> t to each tensor and dtype in the SampleInput + def transform(self, f): + def tt(t): + def _tt(t): + with torch.no_grad(): + return f(t) + + if isinstance(t, torch.Tensor): + return _tt(t) + elif isinstance(t, torch.dtype): + return _tt(t) + elif isinstance(t, list): + return list(map(tt, t)) + elif isinstance(t, tuple): + return tuple(map(tt, t)) + elif isinstance(t, dict): + return {k: tt(v) for k, v in t.items()} + else: + return t + + sample_tt_input, tt_args, tt_kwargs = ( + tt(self.input), + tt(self.args), + tt(self.kwargs), + ) + + # Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid! + return SampleInput( + sample_tt_input, + args=tt_args, + kwargs=tt_kwargs, + output_process_fn_grad=self.output_process_fn_grad, + broadcasts_input=self.broadcasts_input, + name=self.name + "_transformed", + ) + + # Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs) + # Converts tensors to ndarrays by calling .detach().cpu().numpy() on them + # Converts dtypes by remapping them using torch_to_numpy_dtype_dict + def numpy(self): + def to_numpy(t): + if isinstance(t, torch.Tensor): + if t.dtype is torch.bfloat16: + return t.detach().cpu().to(torch.float32).numpy() + if t.dtype is torch.chalf: + return t.detach().cpu().to(torch.cfloat).numpy() + return t.detach().cpu().numpy() + elif isinstance(t, torch.dtype): + return torch_to_numpy_dtype_dict[t] + + return t + + return self.transform(to_numpy) + + def noncontiguous(self): + def to_noncontiguous(t): + if isinstance(t, torch.Tensor): + return noncontiguous_like(t) + elif isinstance(t, torch.dtype): + return t + + return t + + return self.transform(to_noncontiguous) + + +NumericsFilter = collections.namedtuple("NumericsFilter", ["condition", "safe_val"]) + + +class ErrorInput: + """ + A SampleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["sample_input", "error_type", "error_regex"] + + def __init__(self, sample_input, *, error_type=RuntimeError, error_regex): + self.sample_input = sample_input + self.error_type = error_type + self.error_regex = error_regex + + +class AliasInfo: + """Class holds alias information. For example, torch.abs -> + torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_ + """ + + def __init__(self, alias_name): + self.name = alias_name + self.op = _getattr_qual(torch, alias_name) + self.method_variant = getattr(torch.Tensor, alias_name, None) + self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None) + + def __call__(self, *args, **kwargs): + return self.op(*args, **kwargs) + + +# Note [OpInfos] +# ~~~~~~~~~~~~~~ +# +# The majority of this note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261) +# See also: "Writing Test Templates" in common_device_type.py to learn how to +# parametrize a test template using OpInfos. +# See also: PyTorch's GitHub wiki on running and writing tests +# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests +# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py +# +# An OpInfo is a collection of metadata related to a PyTorch operator. This +# metadata is used to generate tests that validate properties of the operator, +# like if it implements the correct gradient formula. +# +# WHY OPINFOS? +# ~~~~~~~~~~~~ +# +# OpInfos are principally intended to do three things: +# +# 1) to allow systematic testing over all PyTorch's operators +# 2) to simplify operating testing by autogenerating many tests +# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test +# against every PyTorch operator +# +# All these goals are still a work in progress. Not every operator has an +# OpInfo, and some operator tests that could be automatically generated +# still have to be written manually. +# +# It's helpful to understand that OpInfos are both about test simplification and +# modularity. PyTorch is a complicated framework with many interrelated systems, +# too many for any one person to keep track of. An OpInfo can be thought of as the +# interface between an operator implementer and those other systems. Instead of +# requiring the implementer of torch.foo understand how to test its forward +# mode AD or NNC support that's typically handled automatically just by +# defining an OpInfo. +# +# It's often surprising to OpInfo writers that just implementing an OpInfo +# typically can't verify an operator is actually implemented correctly: +# +# "If an OpInfo doesn't validate my op works as expected, what's the point +# of it?" +# +# But the point of is the above. OpInfos are intended to let you focus on testing +# the operator logic you're familiar with instead of having to write tests for +# how the operator interacts with each of PyTorch's many systems. +# +# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES +# validate your op works as expected, but that's only in special +# cases. See below for details. +# +# WHAT'S AN OPINFO? +# ~~~~~~~~~~~~~~~~~ +# +# So what is an OpInfo? It's a Python class that describes an operator's properties, +# like which dtypes it supports on the CPU and whether it has any aliases. +# These properties can be divided into three categories: +# +# 1) Metadata describing the operator, like the operator's name and if it +# "supports" the out kwarg. +# 2) Test directives, like "skips" that tell the test suite to skip some +# tests. +# 3) A "sample inputs" function that generates valid inputs for the operator. +# +# OpInfo attributes are described in more detail below. +# +# THE SAMPLE INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The "sample inputs" function merits special elaboration. This function is +# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator +# as a black box. There's no structure for the test to understand or exploit. +# Without "sample inputs" it wouldn't even know how to call the OpInfo's +# operator. The sample input function saves the day by providing different +# "SampleInputs" that can be used to call the operator. A sample input +# function should have the following signature: +# +# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs): +# +# And should return an iterable of SampleInputs (see the class description +# above). Each SampleInput defines an "input", "args", "kwargs", an +# "output_process_fn_grad" function, the "broadcasts_input" bool and a +# "name". +# +# All the "sample_inputs" functions are invoked within a `torch.no_grad()` +# environment for efficiency and correctness. As such remember to set the +# "requires_grad" flag on the inputs **after** performing any transformations +# on them. +# +# The "input" is the first argument to the operator, or the tensor that +# the method or inplace variants of the operator should be called on, and +# should be on the requested device, of the requested dtype, and its +# requires_grad attribute should be set to the requires_grad argument. +# +# "args" should contain positional arguments, and "kwargs" keyword arguments. +# +# "output_process_fn_grad" has an interesting name. It's a function that maps +# the operator's output (when given the input, args, and kwargs) to the +# portion of the output to gradcheck. For example, consider an operator +# like torch.linalg.slogdet +# (https://pytorch.org/docs/main/generated/torch.linalg.slogdet.html). +# This operator returns a tuple of two tensors, but the first tensor +# cannot be backwarded through. Its "output_process_fn_grad" filters +# this output tuple to just the second argument, which we can call backward +# on. Functions that produce a single tensor can ignore this argument. +# +# "broadcasts_input" is a bool indicated if the SampleInput causes the operator +# to broadcast the "input" argument. This is important for tests to understand +# because inplace variants of operations throw a runtime error if they +# would broadcast their input arguments, so tests that work with inplace +# variants filter SampleInputs that broadcast their input. +# +# "name" is a string that's just used for debugging. It appears when printing +# the SampleInput. +# +# Sample inputs are designed to be used with many tests, some +# that are very time consuming, so they should be a small +# set with small tensors. An elaborated set of sample inputs +# can be specified using the "reference_inputs_func" attribute. +# The "reference inputs" for an operation are an extended +# set of sample inputs that can more exhausively test an +# operator. They are used by only a few tests that are careful +# not to take too long to run. Adding reference inputs +# is highly encouraged! +# +# THE (OPTIONAL) ERROR INPUTS FUNCTION +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# OpInfos may optionally specify "error inputs" through an error function. If +# specified test_errors in test_ops.py will call the op with these inputs +# and validate that the desired error is thrown. +# +# Error inputs automate a common testing pattern where multiple inputs are +# passed to an operation and the errors they thrown are reviewed. Tests +# written in this style should be ported to the new OpInfo pattern. +# +# Error inputs are specified using the ErrorInputs class, which contains +# a SampleInput (see above) and data about the expected error. +# +# OPINFO FILE ORGANIZATION +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# +# All OpInfos are currently defined in this file. Most OpInfo tests are defined +# in test_ops.py, but some system-specific tests are defined in those +# systems' test files, and subclass-specific tests are defined in the test +# file that corresponds to that subclass (see the below). +# Expect a reorganization in the future. +# +# WHAT'S TESTED? +# ~~~~~~~~~~~~~~ +# +# Every OpInfo in the op_db sequence has the following properties validated in +# test_ops.py: +# +# - that its supported dtypes are specified correctly +# - that the operation produces the same results when called with noncontiguous inputs +# - that it supports the out= argument properly (if it allows out=), +# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch +# - that it works with the conjugate view bit properly +# - that its function, method, and inplace variants perform the same operation +# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all +# do the same thing). +# - that its inplace variant preserves the input's storage +# - that its gradient formula is implemented correctly, and that it supports +# gradgrad and complex grad and gradgrad and forward mode AD properly for +# the op's function and inplace variants (method variants are skipped +# to reduce test time). +# - that the operation performs the same operation when traced or scripted +# using the jit +# - that the operation is autodifferentiated by the jit as expected +# - that the operator's aliases, if any, perform the same operation and that +# the jit understands the alias +# - that the operator throws the correct errors (if error_inputs is defined) +# - that the operator produces the same results as a NumPy reference (if ref is defined) +# - that the operator produces the same results as a NumPy reference on an extended +# set of "reference inputs" (if both ref and reference_inputs_func are defined) +# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only +# ref is defined, because they effectively autogenerate reference inputs) +# - that the operator works on different CUDA devices +# +# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py, +# and test_fx.py. These tests validate that operators work with NNC and FX +# as expected. +# +# For performance, some of the above tests may only run on the first +# SampleInput returned by an OpInfo's sample input function. +# +# In addition to these tests, some subclasses (discussed in the next section) +# define additional tests. +# +# Critically, as mentioned above, what's not necessarily tested is that the operator +# works as expected. When implementing an OpInfo an engineer must still +# typically write one or more tests validating the operator's behavior. +# The exception to this is if reference testing is sufficient, or if +# the operation belongs to an OpInfo subclass that has more exhaustive +# operator testing. Elementwise unary and elementwise binary operators, +# in particular, usually don't require additional testing beyond +# writing an Opinfo. +# +# +# OPINFO (SUB)CLASSES +# ~~~~~~~~~~~~~~~~~~~ +# +# In addition to the OpInfo base class there are several specialized OpInfo +# subclasses. For example, the UnaryUfuncInfo subclass is used for +# unary elementwise operations. These operations have a common structure +# that test_unary_ufuncs.py exploits with additional automated testing. +# The automated testing in test_unary_ufuncs.py is so thorough, comparing +# the operator to a NumPy reference function on a plethora of values, that +# just implementing an OpInfo for a unary elementwise operation is often +# sufficient testing. +# +# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a +# very unique class of operations. These OpInfos aren't included in the +# op_db sequence and have their own tests. +# +# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience +# when writing OpInfos. +# +# TESTING A NEW OPERATOR +# ~~~~~~~~~~~~~~~~~~~~~~ +# +# If you're adding a new operator to any of the following namespaces: +# - torch +# - torch.fft +# - torch.linalg, +# - torch.special +# - torch.nn.functional +# then you should typically add an OpInfo for it. +# +# As mentioned a couple times above, implementing an OpInfo is not +# usually sufficient testing (unless the operator is a unary or binary elementwise +# operator). The OpInfo will only test the properties described in the +# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is +# implemented correctly. +# +# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to +# be consumed by a variety of systems it can be hard to understand how to +# deal with test failures or how to set the OpInfo metadata properly. +# +# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs +# function must be defined, and the operator's dtypes must be specified. +# Once that's done you should run the operator's tests in test_ops.py +# (these can be filtered using the "-k" argument in pytest). Tests that +# fail should provide an error message that describes what to change about +# your OpInfo. You don't need to worry about changing an OpInfo's default +# values unless a test yells at you. +# +# Similarly, if you're writing a test that consumes OpInfos then it's critical +# your test provides a clear error message describing what to do when it +# fails. You should not assume the OpInfo implementer is familiar with your +# system. +# +# If you see a confusing error message while developing an OpInfo then please +# file an issue describing what happened. +# +# This trial-and-error approach to writing an OpInfo can be frustrating, +# but it's probably necessary as long as OpInfos don't require +# learning about all the systems that consume them. One thing that can help +# is the get_supported_dtypes() function defined in utils.py. This +# function can be used to programmatically specify the dtypes an operator +# supports, and is especially useful if writing an OpInfo on a machine +# without a CUDA device. See its documentation for more details. +# +# THE FUTURE OF OPINFOS AND OPINFO TESTING +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# In the future we expect OpInfo coverage to improve and cover +# the great majority of PyTorch's (public) operators. +# + + +# Classes and methods for the operator database +@dataclass +class OpInfo: + """Operator information and helper functions for acquiring it.""" + + # the string name of the function + name: str + + # An optional reference function that accepts ndarrays (AKA "NumPy arrays"). + # If given, the op will be compared with its reference on each of its sample inputs. + ref: Optional[Callable] = None + + # the following metadata describes the operator, its variants, and its aliases, if any + + # iterable of aliases, e.g. ("absolute",) for torch.abs + aliases: Iterable = None + + # additional string to include in the test name + # this is useful when an op needs multiple OpInfos, + # like divide does, often because it's really several + # different ops behind the scenes + variant_test_name: str = "" + + # the function variant of the operation, populated as torch. if None + op: Callable = None + + # allows the method variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated method + # - if a Callable, then that callable should be the method associated with this operation + method_variant: Callable = _NOTHING + + # allows the inplace variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace variant + # - if a Callable, then that callable should be the inplace variant associated with this operation + inplace_variant: Callable = _NOTHING + + # allows the operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated operator + # - if a Callable, then that callable should be the operator associated with this operation + operator_variant: Callable = _NOTHING + + # allows the inplace operator variant of this operation to be specified as follows: + # - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name + # - if None, then the OpInfo explicitly specifies is has no associated inplace operator + # - if a Callable, then that callable should be the inplace operator associated with this operation + inplace_operator_variant: Callable = _NOTHING + + # the following metadata are test directives for skipping or modifying tests + + # information about which tests to skip + skips: Tuple = () + + # decorators to apply to generated tests + decorators: Tuple = () + + # the following are pointers to functions to generate certain classes of inputs + + # function to generate sample inputs with strided layouts + sample_inputs_func: Callable = None + + # function to generate a more thorough set of samples inputs with strided layouts + reference_inputs_func: Callable = None + + # function to generate inputs that will throw errors + error_inputs_func: Callable = None + + # function to generate sparse (coo, csr, csc, bsr, bsc) inputs that will throw errors + error_inputs_sparse_func: Callable = None + + # function to generate sample inputs with sparse coo layouts + sample_inputs_sparse_coo_func: Callable = None + + # function to generate sample inputs with sparse csr layouts + sample_inputs_sparse_csr_func: Callable = None + + # function to generate sample inputs with sparse csc layouts + sample_inputs_sparse_csc_func: Callable = None + + # function to generate sample inputs with sparse bsr layouts + sample_inputs_sparse_bsr_func: Callable = None + + # function to generate sample inputs with sparse bsc layouts + sample_inputs_sparse_bsc_func: Callable = None + + # the following metadata relates to dtype support and is tested for correctness in test_ops.py + + # dtypes this function works with on the CPU, + # inherited by other device types that don't specify their own dtypes + dtypes: _dispatch_dtypes = None + + # the following dtypesIf... options override the dtypes value on their respective device types + + # dtypes this function is expected to work with on CUDA + dtypesIfCUDA: _dispatch_dtypes = None + + # dtypes this function is expected to work with on ROCM + dtypesIfROCM: _dispatch_dtypes = None + + dtypesIfHpu: _dispatch_dtypes = None + + # dtypes this function is expected to work with on XPU + dtypesIfXPU: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with + backward_dtypes: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on CUDA + backward_dtypesIfCUDA: _dispatch_dtypes = None + + # backward dtypes this function is expected to work with on ROCM + backward_dtypesIfROCM: _dispatch_dtypes = None + + backward_dtypesIfHpu: _dispatch_dtypes = None + + # the following metadata describes the operators out= support + + # whether the op supports the out kwarg + # defaults to True, if the op does not allow the out kwarg or + # supports it incorrectly then test_out in test_ops.py should fail + supports_out: bool = True + + # the following metadata relates to autograd support + # whether the operation supports backward mode AD + # if true, gradient correctness is tested in test_ops.py + # using the op's sample inputs + supports_autograd: bool = True + + # whether the op supports second order gradients + # if true, gradgrad correctness is tested in test_ops.py + # defaults to support_autograd's value + # TODO: rename this to supports_bwgrad_bwgrad to be consistent with below + supports_gradgrad: bool = None + + # whether the ops supports second order gradients via + # forward-over-reverse. If True, forward-over-reverse gradgrad correctness + # is tested. If False, test that forward grad is not implemented. + # Defaults to False. + supports_fwgrad_bwgrad: bool = False + + # whether the operation supports inplace autograd + # if true, tested in test_ops.py + # defaults to supports_autograd's value + supports_inplace_autograd: bool = None + + # Whether the operation support forward mode AD + # If the value is True, we check that the gradients are correct + # If the value is False, we test that forward grad is not implemented + supports_forward_ad: bool = False + + # Whether the operation has a varargs variant + # (e.g. functions like ones, zeros, methods like view, permute) + supports_varargs: bool = False + + # Whether the forward operation avoids materializing COW tensor inputs + supports_cow_input_no_materialize_forward: bool = True + + # Whether the backward operation avoids materializing COW tensor inputs + supports_cow_input_no_materialize_backward: bool = True + + # Whether to skip the backward part of the COW tensor input test + skip_cow_input_backward: bool = False + + # If `supports_cow_input_no_materialize_forward == True`, this list contains + # the arg indices or kwarg names of inputs that are expected to materialize + allow_cow_input_materialize_forward: List[Union[int, str]] = None + + # If `supports_cow_input_no_materialize_backward == True`, this list contains + # the arg indices or kwarg names of inputs that are expected to materialize + allow_cow_input_materialize_backward: List[Union[int, str]] = None + + # wrapper function for gradcheck + gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs) + + # whether to check batched grad when doing gradcheck + # defaults to support_autograd's value + check_batched_grad: bool = None + + # whether to check batched grad grad when doing gradgradcheck + # default's to support_gradgrad's value + check_batched_gradgrad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `supports_forward_ad` + check_batched_forward_grad: bool = None + + # whether to check batched forward grad when doing gradcheck + # defaults to the value of `check_batched_forward_grad` + check_inplace_batched_forward_grad: bool = None + + # tolerance for nondeterminism while performing gradcheck + gradcheck_nondet_tol: float = 0.0 + + # Whether to use the fast implmentation for gradcheck/gradgradcheck. + # When set to None, defers to the default value provided by the wrapper + # function around gradcheck (testing._internal.common_utils.gradcheck) + gradcheck_fast_mode: bool = None + + # the following metadata relates to JIT support and is tested for correctness in test_ops.py + + # name of the corresponding aten:: operator + aten_name: str = None + + # if this is a composite implicit autograd op, the decomposed op + decomp_aten_name: Optional[str] = None + + # name of the corresponding aten:: operator for backwards + aten_backward_name: Optional[str] = None + + # if a op's aten::node is expected to be symbolically autodiffed + assert_autodiffed: bool = False + + # a list of strings with node names that are expected to be in a + # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'], + # default is populated to be ['aten::(name of Python operator)'] + autodiff_nonfusible_nodes: List[str] = None + + # a list of strings with node names that are expected to be in FusionGroups + # inside of DifferentiableGraphs when this operation is autodiffed. + # Ex: ['aten::add', 'aten::mm'], defaults to an empty list + # Note: currently no ops use fusible nodes + autodiff_fusible_nodes: List[str] = None + + # the following metadata relates to sparse support and is used in test_sparse.py + + # whether the op supports sparse coo inputs, defaults to False + # TODO: rename supports_sparse to supports_sparse_coo + supports_sparse: bool = None + + # only run tracing tests + supports_scripting: bool = True + + # if the operator can be traced + supports_tracing: bool = True + + # the following metadata relates to sparse compressed support and + # is used in test_sparse_csr.py and test_sparse.py + + # whether the op supports sparse csr inputs, defaults to False + supports_sparse_csr: bool = None + # whether the op supports sparse csc inputs, defaults to False + supports_sparse_csc: bool = None + # whether the op supports sparse bsr inputs, defaults to False + supports_sparse_bsr: bool = None + # whether the op supports sparse bsc inputs, defaults to False + supports_sparse_bsc: bool = None + # whether the op supports nested jagged inputs, defaults to False + supports_njt: bool = None + + # whether the op promotes integer inputs to float + promotes_int_to_float: bool = False + + # the following metadata relates to complex support and is checked in test_ops.py + + test_conjugated_samples: bool = True + + test_neg_view: bool = True + + # assert that jit shape analysis fully propagates shape + assert_jit_shape_analysis: bool = False + + # the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py + + supports_expanded_weight: bool = False + + is_factory_function: bool = False + + def __post_init__(self): + self._original_opinfo_args = asdict(self).copy() + + assert self.dtypes is not None, f"OpInfo for {self.name} has no dtypes!" + + dtypes_args = ( + self.dtypes, + self.dtypesIfCUDA, + self.dtypesIfROCM, + self.dtypesIfXPU, + ) + + # Validates the dtypes are generated from the dispatch-related functions + for dtype_list in dtypes_args: + assert isinstance(dtype_list, (_dispatch_dtypes, type(None))) + + if self.aten_name is None: + self.aten_name = self.name + + # Attribute to verify dynamic_dtypes are used. + self.dynamic_dtypes = any( + isinstance(dtypes, utils._dynamic_dispatch_dtypes) for dtypes in dtypes_args + ) + + if self.dynamic_dtypes: + # Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU + # This is because, below we set dtypesIfCUDA to dtypes if they are None. + assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), ( + f"To use dynamic dypes for operator {self.name}, " + "acquire the dtypes dynamically for argument `dtypesIfCUDA`." + "This is to ensure that CUDA dtypes are acquired correctly as they" + "differ from CPU dtypes occasionally" + ) + + self.dtypes = set(self.dtypes) + + # NOTE: backward dtypes must be acquired before forward dtypes + # since they fallback to explicit (not implicit!) specifications of + # forward dtypes + self.backward_dtypesIfROCM = ( + set(self.backward_dtypesIfROCM) + if self.backward_dtypesIfROCM is not None + else ( + self.backward_dtypesIfCUDA + if self.backward_dtypesIfCUDA is not None + else self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfROCM + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfCUDA = ( + set(self.backward_dtypesIfCUDA) + if self.backward_dtypesIfCUDA is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypesIfCUDA + if self.dtypesIfCUDA is not None + else self.dtypes + ) + ) + self.backward_dtypesIfHpu = ( + set(self.backward_dtypesIfHpu) + if self.backward_dtypesIfHpu is not None + else ( + self.backward_dtypes + if self.backward_dtypes is not None + else self.dtypes + ) + ) + + self.backward_dtypes = ( + set(self.backward_dtypes) + if self.backward_dtypes is not None + else self.dtypes + ) + + self.dtypesIfCUDA = ( + set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes + ) + self.dtypesIfROCM = ( + set(self.dtypesIfROCM) + if self.dtypesIfROCM is not None + else self.dtypesIfCUDA + ) + self.dtypesIfXPU = ( + set(self.dtypesIfXPU) if self.dtypesIfXPU is not None else self.dtypesIfCUDA + ) + + self.dtypesIfHpu = ( + set(self.dtypesIfHpu) if self.dtypesIfHpu is not None else self.dtypes + ) + + # NOTE: if the op is unspecified it is assumed to be under the torch namespace + if not self.op: + self.op = _getattr_qual(torch, self.name) + + if self.method_variant is _NOTHING: + self.method_variant = getattr(torch.Tensor, self.name, None) + + # attributes like real, imag are not callable + if not callable(self.method_variant): + self.method_variant = None + + if self.inplace_variant is _NOTHING: + inplace_name = self.name + "_" + self.inplace_variant = getattr(torch.Tensor, inplace_name, None) + + if self.operator_variant is _NOTHING: + self.operator_variant = getattr(operator, self.name, None) + + if self.inplace_operator_variant is _NOTHING: + # Note: operator.i will use operator. and assign the result to the lhs when no + # __i__ method is found. This results in the appearance of an inplace operator variant which + # does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace + # operator with a check that an inplace variant exists. + if self.inplace_variant is not None: + inplace_operator_name = "i" + self.name + self.inplace_operator_variant = getattr( + operator, inplace_operator_name, None + ) + else: + self.inplace_operator_variant = None + + self.decorators = (*self.decorators, *self.skips) + + # Specifying sample inputs function without specifying the + # corresponding layout support implies the layout support: + if self.supports_sparse is None: + self.supports_sparse = self.sample_inputs_sparse_coo_func is not None + if self.sample_inputs_sparse_coo_func is None: + self.sample_inputs_sparse_coo_func = self._sample_inputs_unspecified + + if self.supports_sparse_csr is None: + self.supports_sparse_csr = self.sample_inputs_sparse_csr_func is not None + if self.sample_inputs_sparse_csr_func is None: + self.sample_inputs_sparse_csr_func = self._sample_inputs_unspecified + + if self.supports_sparse_csc is None: + self.supports_sparse_csc = self.sample_inputs_sparse_csc_func is not None + if self.sample_inputs_sparse_csc_func is None: + self.sample_inputs_sparse_csc_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsr is None: + self.supports_sparse_bsr = self.sample_inputs_sparse_bsr_func is not None + if self.sample_inputs_sparse_bsr_func is None: + self.sample_inputs_sparse_bsr_func = self._sample_inputs_unspecified + + if self.supports_sparse_bsc is None: + self.supports_sparse_bsc = self.sample_inputs_sparse_bsc_func is not None + if self.sample_inputs_sparse_bsc_func is None: + self.sample_inputs_sparse_bsc_func = self._sample_inputs_unspecified + + if self.supports_njt is None: + self.supports_njt = False + + # We run the sampling functions without tracking the gradiends of the creation of inputs + self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func) + self.sample_inputs_sparse_coo_func = torch.no_grad()( + self.sample_inputs_sparse_coo_func + ) + self.sample_inputs_sparse_csr_func = torch.no_grad()( + self.sample_inputs_sparse_csr_func + ) + self.sample_inputs_sparse_csc_func = torch.no_grad()( + self.sample_inputs_sparse_csc_func + ) + self.sample_inputs_sparse_bsr_func = torch.no_grad()( + self.sample_inputs_sparse_bsr_func + ) + self.sample_inputs_sparse_bsc_func = torch.no_grad()( + self.sample_inputs_sparse_bsc_func + ) + if self.reference_inputs_func is not None: + self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func) + + if not self.autodiff_fusible_nodes: + self.autodiff_fusible_nodes = [] + + if self.autodiff_nonfusible_nodes is None: + self.autodiff_nonfusible_nodes = ["aten::" + self.name] + + # Autograd support + + # Autograd flags that depend on backward AD only + # - If setting has been explicitly set, raise error if inconsistent + if self.supports_gradgrad is None: + self.supports_gradgrad = self.supports_autograd + else: + assert not (self.supports_gradgrad and not self.supports_autograd), ( + "supports_gradgrad refines the part of autograd is supported, so it should " + "not be set if supports_autograd is False" + ) + if self.check_batched_grad is None: + self.check_batched_grad = self.supports_autograd or self.supports_forward_ad + else: + assert not ( + self.check_batched_grad + and not (self.supports_autograd or self.supports_forward_ad) + ), ( + "check_batched_grad refines the part of autograd that will be checked (by gradcheck), so " + "it should not be set if supports_autograd is False" + ) + if self.check_batched_gradgrad is None: + self.check_batched_gradgrad = self.supports_gradgrad + else: + assert not (self.check_batched_gradgrad and not self.supports_gradgrad), ( + "check_batched_gradgrad refines the part of autograd that will be checked (by " + "gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd " + "is False." + ) + if self.check_batched_forward_grad is None: + self.check_batched_forward_grad = self.supports_forward_ad + else: + assert not ( + self.check_batched_forward_grad and not self.supports_forward_ad + ), ( + "check_batched_forward_grad should only be used when supports_forward_ad " + "is True. It is used to disable the test in the specific cases " + "where the op supports forward ad but fails to compute " + "batched forward grad." + ) + + if self.check_inplace_batched_forward_grad is None: + self.check_inplace_batched_forward_grad = self.check_batched_forward_grad + else: + assert not ( + self.check_inplace_batched_forward_grad + and not self.check_batched_forward_grad + ), ( + "check_batched_forward_grad should only be used when check_batched_forward_grad " + "is True. It is used to disable the test in the specific cases " + "where the op supports batched forward grad but fails to compute batched forward " + "grad for the inplace variant of the op." + ) + + assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), ( + "supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be " + "True if backward ad is also checked, i.e., supports_forward_ad should be True.", + self.name, + ) + + # Autograd flags that depend on both forward AD and backward AD + if self.supports_inplace_autograd is None: + self.supports_inplace_autograd = ( + self.supports_autograd or self.supports_forward_ad + ) + else: + assert not ( + self.supports_inplace_autograd + and not self.supports_autograd + and not self.supports_forward_ad + ), ( + "supports_inplace_autograd refines the part of autograd that is supported, so " + "it should not be set if both supports_autograd and supports_forward_ad are False" + ) + + if self.aliases is not None: + self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment] + else: + self.aliases = () + + def __call__(self, *args, **kwargs): + """Calls the function variant of the operator.""" + return self.op(*args, **kwargs) + + def __str__(self): + return dataclass_repr(self) + + def get_op(self): + """Returns the function variant of the operator, torch..""" + return self.op + + def get_method(self): + """Returns the method variant of the operator, torch.Tensor.. + Returns None if the operator has no method variant. + """ + return self.method_variant + + def get_inplace(self): + """Returns the inplace variant of the operator, torch.Tensor._. + Returns None if the operator has no inplace variant. + """ + return self.inplace_variant + + def get_operator(self): + """Returns operator variant of the operator, e.g. operator.neg + Returns None if the operator has no operator variant. + """ + return self.operator_variant + + def get_inplace_operator(self): + """Returns the inplace operator variant of the operator, e.g operator.iadd + Returns None if the operator has no inplace operator variant""" + return self.inplace_operator_variant + + def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs but with the tensor input or first + tensor in a sequence input conjugated. + """ + + set_seed = kwargs.pop("set_seed", True) + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + conj_samples = list(samples) + + def conjugate(tensor): + _requires_grad = tensor.requires_grad + tensor = tensor.conj() + return tensor.requires_grad_(_requires_grad) + + for i, sample in enumerate(samples): + sample = conj_samples[i] + # Note: it is assumed that the input here is either a tensor or tensorlist + if isinstance(sample.input, torch.Tensor): + sample.input = conjugate(sample.input) + else: + sample.input[0] = conjugate(sample.input[0]) + + return TrackedInputIter( + iter(conj_samples), + "conjugate sample input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def sample_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + These samples should be sufficient to test the function works correctly + with autograd, TorchScript, etc. + """ + set_seed = kwargs.pop("set_seed", True) + samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs) + + if kwargs.get("include_conjugated_inputs", False): + conj_samples = self.conjugate_sample_inputs( + device, dtype, requires_grad, **kwargs + ) + samples_list = list(samples) + samples_list.extend(conj_samples) + samples = tuple(samples_list) + + return TrackedInputIter( + iter(samples), + "sample input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def reference_inputs(self, device, dtype, requires_grad=False, **kwargs): + """ + Returns an iterable of SampleInputs. + + Distinct from sample_inputs() above because this returns an expanded set + of inputs when reference_inputs_func is defined. If undefined this returns + the sample inputs. + """ + set_seed = kwargs.pop("set_seed", True) + if self.reference_inputs_func is None: + samples = self.sample_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter( + iter(samples), + "reference input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + if kwargs.get("include_conjugated_inputs", False): + raise NotImplementedError + + references = self.reference_inputs_func( + self, device, dtype, requires_grad, **kwargs + ) + return TrackedInputIter( + iter(references), + "reference input", + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def error_inputs(self, device, **kwargs): + """ + Returns an iterable of ErrorInputs. + """ + set_seed = kwargs.pop("set_seed", True) + errs = self.error_inputs_func(self, device, **kwargs) + return TrackedInputIter( + iter(errs), + "error input", + callback=lambda e: e.sample_input, + set_seed=set_seed, + restrict_to_index=OPINFO_SAMPLE_INPUT_INDEX, + ) + + def error_inputs_sparse(self, device, layout, **kwargs): + """ + Returns an iterable of ErrorInputs that contain sparse sample + inputs with a specified layout. + """ + if not self.supports_sparse_layout(layout): + raise unittest.SkipTest("unsupported sparse layout") + return self.error_inputs_sparse_func(self, device, layout, **kwargs) + + def supports_sparse_layout(self, layout): + """Return True if OpInfo supports the specified sparse layout.""" + layout_name = str(layout).split(".")[-1] + # map torch.sparse_coo to OpInfo.supports_sparse: + layout_name = layout_name.replace("_coo", "") + return getattr(self, f"supports_{layout_name}") + + def sample_inputs_sparse( + self, layout, device, dtype, requires_grad=False, **kwargs + ): + """Returns an iterable of SampleInputs that contain inputs with a + specified sparse layout. + """ + layout_name = str(layout).split(".")[-1] + sample_inputs_mth = getattr(self, "sample_inputs_" + layout_name) + + def non_empty_sampler(op, generator): + found_sample = False + for sample in generator: + found_sample = True + yield sample + if not found_sample: + raise unittest.SkipTest("NO SAMPLES!") + + return non_empty_sampler( + self, + sample_inputs_mth(device, dtype, requires_grad=requires_grad, **kwargs), + ) + + def _sample_inputs_unspecified(self, *args, **kwargs): + """Raises an NotImplemented exception in a OpInfo instance creation + that specifies supports_sparse(|_csr|_csc|_bsr|_bsc)=True + without specifying the corresponding sample function as + sample_inputs_sparse_(coo|csr|csc|bsr|bsc)_func. + + To avoid this, either define the corresponding sample function, + or re-map unsupported samples to error inputs in an appropiate + + opinfo/definitions/sparse.py:_validate_sample_input_sparse_ + + function. + """ + raise NotImplementedError("no sample function specified") + + def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + coo layout. + """ + return self.sample_inputs_sparse_coo_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csr layout. + """ + return self.sample_inputs_sparse_csr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + csc layout. + """ + return self.sample_inputs_sparse_csc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsr layout. + """ + return self.sample_inputs_sparse_bsr_func( + self, device, dtype, requires_grad, **kwargs + ) + + def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs): + """Returns an iterable of SampleInputs that contain inputs with sparse + bsc layout. + """ + return self.sample_inputs_sparse_bsc_func( + self, device, dtype, requires_grad, **kwargs + ) + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + """Returns the decorators targeting the given test.""" + result = [] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active( + test_class, test_name, device, dtype, param_kwargs + ): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + def supported_dtypes(self, device_type): + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + if device_type == "cuda": + return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA + if device_type == "xpu": + return self.dtypesIfXPU + if device_type == "hpu": + return self.dtypesIfHpu + return self.dtypes + + def supported_backward_dtypes(self, device_type): + if not self.supports_autograd: + return set() + + if device_type == "privateuse1": + device_type = torch._C._get_privateuse1_backend_name() + device_type = torch.device(device_type).type + backward_dtypes = None + if device_type == "cuda": + backward_dtypes = ( + self.backward_dtypesIfROCM + if TEST_WITH_ROCM + else self.backward_dtypesIfCUDA + ) + elif device_type == "hpu": + backward_dtype = self.backward_dtypesIfHpu + else: + backward_dtypes = self.backward_dtypes + + allowed_backward_dtypes = floating_and_complex_types_and( + torch.bfloat16, torch.float16, torch.complex32 + ) + return set(allowed_backward_dtypes).intersection(backward_dtypes) + + def supports_dtype(self, dtype, device_type) -> bool: + return dtype in self.supported_dtypes(device_type) + + @property + def full_name(self): + """Returns a full name that helps to uniquely identify this OpInfo.""" + variant = "." + self.variant_test_name if self.variant_test_name else "" + # example: "normal.in_place" where "normal" is the name and "in_place" is the variant + return f"{self.name}{variant}" + + @property + def formatted_name(self): + """Returns a formatted full name for this OpInfo that can be used in test names.""" + return self.full_name.replace(".", "_") + + +def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs): + """Generates input tensors for testing reduction operators""" + yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad) + yield make_tensor( + [3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad + ) + + +def _generate_reduction_kwargs(ndim, supports_multiple_dims=True): + """Generates a subset of all valid dim and keepdim kwargs given ndim that + is appropriate for testing reduction operators. + """ + + # Test default dim and keepdim + yield {} + + # Test reducing inner and outer most dimensions + yield {"dim": 0, "keepdim": True} + yield {"dim": -1, "keepdim": False} + + # Test reducing middle dimension + if ndim > 2: + yield {"dim": ndim // 2, "keepdim": True} + + if supports_multiple_dims: + # Test reducing all dimensions + yield {"dim": tuple(range(ndim)), "keepdim": False} + + # Test reducing both first and last dimensions + if ndim > 1: + yield {"dim": (0, -1), "keepdim": True} + + # Test reducing every other dimension starting with the second + if ndim > 3: + yield {"dim": tuple(range(1, ndim, 2)), "keepdim": False} + + +def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for reduction operators.""" + + # TODO(@heitorschueroff) Once all reduction operators are using + # ReductionOpInfo use op_info.supports_multiple_dims directly. + supports_multiple_dims: bool = kwargs.get("supports_multiple_dims", True) + + # TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo + # use op_info.generate_args_kwargs directly. + generate_args_kwargs = kwargs.get( + "generate_args_kwargs", lambda *args, **kwargs: (yield (), {}) + ) + + for t in _generate_reduction_inputs(device, dtype, requires_grad): + for reduction_kwargs in _generate_reduction_kwargs( + t.ndim, supports_multiple_dims + ): + for args, kwargs in generate_args_kwargs(t, **reduction_kwargs): + kwargs.update(reduction_kwargs) + yield SampleInput( + t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs + ) + + +# NOTE [Reductions]: +# +# For testing purposes, we relax the definition of a reduction operator +# as defined in the docstring below. We do this to capture operators with +# a similar API so they can be tested automatically. However... +# +# Strictly speaking a reduction operator is an operator that can reduce an +# array to a single scalar value and that can be computed from the partial +# result of reducing subarrays. This usually means that the reduction operation +# should be commutative and associative. This definition is important when it +# comes to implementation as it determines how a reduction can be parallelized. +# +# For example, many summary statistics such as median, mode and quantile cannot +# be computed from partial results because these are sorting and counting based +# algorithms that need information that would be lost in the reduced value. +class ReductionOpInfo(OpInfo): + """Reduction operator information. + + An operator is a reduction operator if it reduces one or more dimensions of + the input tensor to a single value. Reduction operators must implement the + following signature: + + - `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor` + + ReductionOpInfo tests that reduction operators implement a consistent API. + Optional features such as reducing over multiple dimensions are captured in + the optional keyword parameters of the ReductionOpInfo constructor. + + If a reduction operator does not yet implement the full required API of + reduction operators, this should be documented by xfailing the failing + tests rather than adding optional parameters to ReductionOpInfo. + + NOTE + The API for reduction operators has not yet been finalized and some + requirements may change. + + See tests in test/test_reductions.py + """ + + def __init__( + self, + name, + *, + # The identity value for the operator if it has one. + identity: Optional[Any] = None, + # The nan policy for the operator if it implements one. + # - propagate: NaN values are propagated to the output + # - omit: NaN values are discarded during the reduction + nan_policy: Optional[str] = None, + # Whether the operator supports reducing multiple dimensions. + supports_multiple_dims: bool = True, + # Whether the operator promotes integral to floating point dtypes. + promotes_int_to_float: bool = False, + # Whether the operator promotes all integral dtypes to int64. + promotes_int_to_int64: bool = False, + # If a specific dtype is given, then the operator always returns that + # dtype irrespective of the input dtype. If None, the operator returns + # the dtype according to the type promotion rules above. + result_dtype: Optional[torch.dtype] = None, + # Casts complex results to real (e.g. linalg.norm or torch.var) + complex_to_real: bool = False, + # ReductionOpInfo tests generate their own input, dim and keepdim + # arguments and call this function to generate tuples of extra args and + # kwargs to use when calling the op. This is required for operators that + # have other required parameters besides the input tensor. + generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: ( + yield (), + {}, + ), + # Options from the OpInfo base class + **kwargs, + ): + self._original_reduction_args = locals().copy() + assert nan_policy in (None, "propagate", "omit") + + # These are mutually exclusive options + assert not (result_dtype and promotes_int_to_float) + assert not (result_dtype and promotes_int_to_int64) + assert not (result_dtype and complex_to_real) + assert not (promotes_int_to_float and promotes_int_to_int64) + + # Default sample_inputs_func for ReductionOpInfo which augments sample + # inputs from sample_inputs_reduction with the args and kwargs from + # generate_args_kwargs. This is only used if sample_inputs_func is None. + def sample_inputs_func(*args, **kwargs): + kwargs["supports_multiple_dims"] = supports_multiple_dims + kwargs["generate_args_kwargs"] = generate_args_kwargs + yield from sample_inputs_reduction(*args, **kwargs) + + # Override OpInfo defaults and call base class __init__ + kwargs.setdefault("inplace_variant", None) + kwargs.setdefault("sample_inputs_func", sample_inputs_func) + super().__init__(name, promotes_int_to_float=promotes_int_to_float, **kwargs) + + self.identity = identity + self.nan_policy = nan_policy + self.supports_multiple_dims = supports_multiple_dims + self.promotes_int_to_int64 = promotes_int_to_int64 + self.complex_to_real = complex_to_real + self.result_dtype = result_dtype + self.generate_args_kwargs = generate_args_kwargs + + +# The base reference input generation for elementwise binary operations +def _reference_inputs_elementwise_binary( + op, device, dtype, requires_grad, exclude_zero, **kwargs +): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + yield from generate_elementwise_binary_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + if dtype is not torch.bool: + yield from generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + if dtype not in (torch.bool, torch.uint8, torch.int8): + yield from generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + yield from generate_elementwise_binary_broadcasting_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield from generate_elementwise_binary_with_scalar_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + if dtype.is_floating_point or dtype.is_complex: + yield from generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ) + + +# Note that these references inputs use scalars for the SampleInput.input value, +# and many tests require SampleInput.input be a tensor or a list of tensors +def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + gen = partial( + _reference_inputs_elementwise_binary, + op, + device, + dtype, + requires_grad, + exclude_zero, + **kwargs, + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_binary_noncontiguous_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + yield from generate_elementwise_binary_arbitrarily_strided_tensors( + op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + +# A functional that extends an elementwise binary operator's bespoke error inputs +# with generic error inputs for the class of elementwise binary operations +def make_error_inputs_elementwise_binary(error_inputs_func): + def error_inputs_func_wrapper(op, device, **kwargs): + if error_inputs_func is not None: + yield from error_inputs_func(op, device, **kwargs) + + if not op.supports_rhs_python_scalar: + si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if not op.supports_one_python_scalar: + si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + if ( + not kwargs.get("skip_two_python_scalars", False) + and not op.supports_two_python_scalars + ): + si = SampleInput(2, args=(3,)) + yield ErrorInput(si, error_type=Exception, error_regex="") + + return error_inputs_func_wrapper + + +# The following functions and classes are for testing elementwise binary operators. + + +# Returns a generator of pairs of contiguous tensors on the requested device +# and with the requested dtype. +# +# This function is intended to test the non-vectorized and vectorized code +# paths of elementwise binary functions, as well as their handling of odd tensor +# sizes (like zero-dim tensors and tensors with zero elements). +# +# Each iterable will include an a tensor with no elements, +# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and +# a large 2D tensor. +def generate_elementwise_binary_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + # tensors with no elements + (0,), + (1, 0, 3), + # zero dim (scalar) tensor + (), + # small 1D tensor + (20,), + # medium 1D tensor + (812,), + # large 2D tensor + (1029, 917), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + b = make_arg(shape) + yield SampleInput(a, args=(b,)) + + +# Returns a generator of pairs of contiguous tensors on the requested device and with +# the requested dtype. +# +# Unlike the previous function, the values in these tensors are specified manually. +def generate_elementwise_binary_small_value_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=None +): + if exclude_zero is None: + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + # defines interesting values + _unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254) + _int_vals = (0, -1, 1, -55, 55, -127, 127, -128) + _float_vals = ( + 0.0, + -0.0, + -0.001, + 0.001, + -0.25, + 0.25, + -1.0, + 1.0, + -math.pi / 2, + math.pi / 2, + -math.pi + 0.00001, + math.pi - 0.00001, + -math.pi, + math.pi, + -math.pi - 0.00001, + math.pi + 0.00001, + ) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_vals, _float_vals) + elif dtype.is_complex: + complex_vals = product(_float_vals, _float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64): + prod = product(_int_vals, _int_vals) + elif dtype is torch.uint8: + prod = product(_unsigned_int_vals, _unsigned_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + if r == 0 and exclude_zero: + r_vals.append(1) + else: + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + _large_int_vals = (-1113, 1113, -10701, 10701) + _large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7) + _large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20) + + l_vals = [] + r_vals = [] + + if dtype == torch.float16: + prod = product(_large_float16_vals, _large_float16_vals) + elif dtype.is_floating_point: + prod = product(_large_float_vals, _large_float_vals) + elif dtype.is_complex: + complex_vals = product(_large_float_vals, _large_float_vals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + elif dtype in (torch.int16, torch.int32, torch.int64): + prod = product(_large_int_vals, _large_int_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + +def generate_elementwise_binary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + _float_extremals = (float("inf"), float("-inf"), float("nan")) + + l_vals = [] + r_vals = [] + + if dtype.is_floating_point: + prod = product(_float_extremals, _float_extremals) + elif dtype.is_complex: + complex_vals = product(_float_extremals, _float_extremals) + # Note the use of list is required here or the map generator will be + # emptied by the following product and it won't produce the desired cross-product + complex_vals = [complex(*x) for x in complex_vals] + prod = product(complex_vals, complex_vals) + else: + raise ValueError("Unsupported dtype!") + + for l, r in prod: + l_vals.append(l) + r_vals.append(r) + + lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad) + rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(lhs, args=(rhs,)) + + # Test case for NaN propagation + nan = ( + float("nan") if dtype.is_floating_point else complex(float("nan"), float("nan")) + ) + lhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + lhs.view(-1)[::3] = nan + rhs = make_tensor( + (128, 128), device=device, dtype=dtype, requires_grad=requires_grad + ) + rhs.view(-1)[::3] = nan + + yield SampleInput(lhs, args=(rhs,)) + + +# Returns a generator of pairs of contiguous and noncontiguous tensors that +# require broadcasting +def generate_elementwise_binary_broadcasting_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + shapes = ( + ((1,), ()), + ((2,), ()), + ((1,), (2,)), + ((2, 1), (2,)), + ((1, 2), (2,)), + ((3, 2), (2,)), + ((1, 3, 2), (2,)), + ((1, 3, 2), (3, 2)), + ((3, 1, 2), (3, 2)), + ((2, 3, 2), ()), + ((3, 1, 2), (1, 3, 2)), + ) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + for shape, noncontiguous in product(shapes, [True, False]): + shape_lhs, shape_rhs = shape + lhs = make_arg( + shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs + ) + rhs = make_arg( + shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs + ) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=True) + + +# Returns a generator of pairs of contiguous tensors and scalars +def generate_elementwise_binary_with_scalar_samples( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5)) + if op.supports_rhs_python_scalar: + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs, args=(rhs_scalar,)) + + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(lhs_scalar, args=(rhs,)) + + if op.supports_two_python_scalars: + lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item() + rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item() + + yield SampleInput(lhs_scalar, args=(rhs_scalar,)) + + +# Returns a generator of pairs of contiguous tensors and 0d tensors and scalars and type promotion +def generate_elementwise_binary_with_scalar_and_type_promotion_samples( + op, *, device, dtype, requires_grad=False +): + # add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars + if op.name in ( + "eq", + "ne", + "gt", + "ge", + "lt", + "le", + "logical_and", + "logical_or", + "logical_xor", + ): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + shape = ( + 23, + ) # this shape is big enough to trigger vectorization, and has non-vectorized tail + values = (float("nan"), float("inf"), -float("inf")) + scalar_tensors = tuple(torch.tensor(val) for val in values) + if op.supports_rhs_python_scalar: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + for scalar in values + scalar_tensors: + yield SampleInput(lhs, args=(scalar,)) + # Extends with scalar lhs + if op.supports_one_python_scalar: + yield SampleInput(scalar, args=(rhs,)) + + +# Returns a generator of pairs of noncontiguous tensors +def generate_elementwise_binary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False, exclude_zero=False +): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + # Generic noncontiguity + lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs) + rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.clone(), args=(rhs.clone(),)) + yield SampleInput(lhs.contiguous(), args=(rhs,)) + + # Transposed + lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs) + rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs.T, args=(rhs.T,)) + + # More noncontiguity + shapes = ((5, 7), (1024,)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + lhs_non_contig.copy_(lhs) + + rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0] + rhs_non_contig.copy_(rhs) + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Noncontiguous indices + shape = (2, 2, 1, 2) + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs[:, 1, ...] + rhs_non_contig = rhs[:, 1, ...] + + yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),)) + yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,)) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + lhs = make_arg(shape, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape, **op.rhs_make_tensor_kwargs) + + lhs_non_contig = lhs.expand(3, -1, -1) + rhs_non_contig = rhs.expand(3, -1, -1) + + yield SampleInput(lhs_non_contig, args=(rhs_non_contig,)) + + +# Sample inputs for elementwise binary operators, like add +def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs): + _M = S if kwargs.get("small_inputs_only", False) else M + _S = XS if kwargs.get("small_inputs_only", False) else S + + if hasattr(op, "rhs_make_tensor_kwargs"): + exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False) + + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + + shapes = ( + ((), ()), + ((_S,), ()), + ((_S, 1), (_S,)), + ((_M, _S), ()), + ((_S, _M, _S), (_M, _S)), + ((_S, _M, _S), (_S, _M, _S)), + ((_M, 1, _S), (_M, _S)), + ((_M, 1, _S), (1, _M, _S)), + ((0, 1, XS), (0, _M, XS)), + ) + + sample_kwargs = kwargs.get("sample_kwargs", {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs) + rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput( + lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input + ) + + +# Metadata class for binary "universal functions (ufuncs)" that accept two +# tensor and have common properties +class BinaryUfuncInfo(OpInfo): + """Operator information for 'universal binary functions (binary ufuncs).' + These are functions of two tensors with common properties like: + - they are elementwise functions + - the output shape is determined by the input shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/stable/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, + *, + sample_inputs_func=sample_inputs_elementwise_binary, + reference_inputs_func=reference_inputs_elementwise_binary, + error_inputs_func=None, + lhs_make_tensor_kwargs=None, + rhs_make_tensor_kwargs=None, + always_returns_bool=False, # Set to true if the op always returns bool tensors + supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs + supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs + supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs + **kwargs, + ): + self._original_binary_ufunc_args = locals().copy() + + # Elementwise binary operations perform the equivalent of test_numpy_refs + # in test_binary_ufuncs, but with additional test granularity. So the + # generic test_ops.py test is skipped because it's redundant. + common_skips = ( + DecorateInfo( + unittest.skip("Skipping redundant test."), + "TestCommon", + "test_numpy_refs", + ), + ) + kwargs["skips"] = kwargs.get("skips", ()) + common_skips + super().__init__( + name, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), + **kwargs, + ) + + # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. + if lhs_make_tensor_kwargs is None: + lhs_make_tensor_kwargs = {} + self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs + + if rhs_make_tensor_kwargs is None: + rhs_make_tensor_kwargs = {} + self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs + + self.always_returns_bool = always_returns_bool + self.supports_rhs_python_scalar = supports_rhs_python_scalar + self.supports_one_python_scalar = supports_one_python_scalar + self.supports_two_python_scalars = supports_two_python_scalars + + if self.supports_two_python_scalars: + self.supports_one_python_scalar = True + + if self.supports_one_python_scalar: + assert ( + supports_rhs_python_scalar + ), "Can't support lhs and rhs Python scalars but not rhs scalars!" + + +# The following functions and classes are for testing elementwise unary operators. +def sample_inputs_elementwise_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + _L = S if kwargs.get("small_inputs_only", False) else L + + low, high = op_info.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op_info._domain_eps + high = high if high is None or not is_floating else high - op_info._domain_eps + if ( + op_info.supports_sparse_csr + or op_info.supports_sparse_csc + or op_info.supports_sparse_bsr + or op_info.supports_sparse_bsc + ): + # Tensors with dim=2 for sparse compressed testing + yield SampleInput( + make_tensor( + (_L, _L), + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + else: + # Creates a 1D, empty, and scalar tensor + for shape in ((_L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=low, + high=high, + requires_grad=requires_grad, + ), + kwargs=op_kwargs, + ) + + +# Replace values satisfying condition with a safe value. This is used to block +# out values the could cause singularity like tan(pi/2) +def _replace_values_in_tensor(tensor, condition, safe_value): + mask = condition(tensor) + tensor.masked_fill_(mask, safe_value) + + +# Helper to create a unary elementwise tensor with valid inputs +def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): + low, high = op.domain + is_floating = dtype.is_floating_point or dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) + + if op.reference_numerics_filter is not None and dtype is not torch.bool: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + return a + + +# Restricts the values in the tensor to the domain of the +# given elementwise unary operator +def _filter_unary_elementwise_tensor(a, *, op): + # short-circuits for boolean tensors + if a.dtype is torch.bool: + return a + + low, high = op.domain + is_floating = a.dtype.is_floating_point or a.dtype.is_complex + low = low if low is None or not is_floating else low + op._domain_eps + high = high if high is None or not is_floating else high - op._domain_eps + + if a.dtype is torch.uint8 and low is not None: + low = max(low, 0) + + if not a.dtype.is_floating_point and not a.dtype.is_complex: + low = math.ceil(low) if low is not None else None + high = math.floor(high) if high is not None else None + + if op.reference_numerics_filter is not None: + condition, safe_value = op.reference_numerics_filter + _replace_values_in_tensor(a, condition, safe_value) + + if low is not None or high is not None: + if a.dtype.is_complex: + a.real.clamp_(low, high) + a.imag.clamp_(low, high) + else: + a.clamp_(min=low, max=high) + + return a + + +def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): + # Special-cases bool + if dtype is torch.bool: + tensors = ( + torch.empty(0, device=device, dtype=torch.bool), + torch.tensor(True, device=device), + torch.tensor(False, device=device), + torch.tensor((True, False), device=device), + make_tensor((812,), device=device, dtype=dtype), + make_tensor((1029, 917), device=device, dtype=dtype), + ) + for a in tensors: + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + shapes = ( + (1029, 917), + (812,), + # Empty sizes + (0,), + (0, 3, 3), + (1, 0, 5), + (6, 0, 0, 0), + (3, 0, 1, 0), + ) + + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + for shape in shapes: + a = make_arg(shape) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_small_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_large_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + a = _filter_unary_elementwise_tensor(sample.input, op=op) + yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +def generate_elementwise_unary_extremal_value_tensors( + op, *, device, dtype, requires_grad=False +): + for sample in generate_elementwise_binary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad + ): + yield SampleInput( + sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] + ) + + +def generate_elementwise_unary_noncontiguous_tensors( + op, *, device, dtype, requires_grad=False +): + make_arg = partial( + _make_unary_elementwise_tensor, + op=op, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + + # Generic noncontiguity + t = make_arg((1026,), noncontiguous=True) + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Transposed + t = make_arg((1024, 1024)).T + yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) + + # Expanded tensors + shapes = ((1, 3), (1, 7), (5, 7)) + + for shape in shapes: + t = make_arg(shape) + t_non_contig = t.expand(3, -1, -1) + yield SampleInput( + t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] + ) + + +def generate_elementwise_unary_arbitrarily_strided_tensors( + op, *, device, dtype, requires_grad=False +): + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for shape, strides, offset in strided_cases: + a = make_arg( + 500, + ).as_strided(shape, strides, offset) + yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) + + +# Reuses the elementwise binary generators for consistency +# TODO: in the future generalize the reference generators to handle n-ary elementwise operations +def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + yield from generate_elementwise_unary_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype is not torch.bool: + yield from generate_elementwise_unary_small_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + if dtype not in (torch.bool, torch.uint8, torch.int8) and ( + op.handles_large_floats + or (not dtype.is_floating_point and not dtype.is_complex) + ): + yield from generate_elementwise_unary_large_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + if dtype.is_floating_point or ( + op.handles_complex_extremal_values and dtype.is_complex + ): + yield from generate_elementwise_unary_extremal_value_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): + gen = partial( + _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs + ) + + # yields "normal" samples + yield from gen() + + # yields noncontiguous samples + for sample in gen(): + yield sample.noncontiguous() + + yield from generate_elementwise_unary_noncontiguous_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield from generate_elementwise_unary_arbitrarily_strided_tensors( + op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + +# Metadata class for unary "universal functions (ufuncs)" that accept a single +# tensor and have common properties like: +class UnaryUfuncInfo(OpInfo): + """Operator information for 'universal unary functions (unary ufuncs).' + These are functions of a single tensor with common properties like: + - they are elementwise functions + - the input shape is the output shape + - they typically have method and inplace variants + - they typically support the out kwarg + - they typically have NumPy or SciPy references + See NumPy's universal function documentation + (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details + about the concept of ufuncs. + """ + + def __init__( + self, + name, # the string name of the function + *, + dtypes=floating_types(), + domain=(None, None), # the [low, high) domain of the function + handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf) + handles_large_floats=True, # whether the op correctly handles large float values (like 1e20) + supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle + sample_inputs_func=sample_inputs_elementwise_unary, + reference_inputs_func=reference_inputs_elementwise_unary, + sample_kwargs=lambda device, dtype, input: ({}, {}), + reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested + **kwargs, + ): + self._original_unary_ufunc_args = locals().copy() + + super().__init__( + name, + dtypes=dtypes, + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + **kwargs, + ) + self.domain = domain + self.handles_complex_extremal_values = handles_complex_extremal_values + self.handles_large_floats = handles_large_floats + self.supports_complex_to_float = supports_complex_to_float + self.reference_numerics_filter = reference_numerics_filter + + # test_unary_ufuncs.py generates its own inputs to test the consistency + # of the operator on sliced tensors, non-contig tensors, etc. + # `sample_kwargs` is a utility function to provide kwargs + # along with those inputs if required (eg. clamp). + # It should return two dictionaries, first holding kwarg for + # torch operator and second one for reference NumPy operator. + self.sample_kwargs = sample_kwargs + + # Epsilon to ensure grad and gradgrad checks don't test values + # outside a function's domain. + self._domain_eps = 1e-5 + + +def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs): + is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half + if not is_fp16_or_chalf: + nd_tensor = partial( + make_tensor, + (S, S + 1, S + 2), + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, (31,), device=device, dtype=dtype, requires_grad=requires_grad + ) + else: + # cuFFT supports powers of 2 for half and complex half precision + # NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args + # where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two + low = None + high = None + if self.name in ["fft.hfft", "fft.irfft", "_refs.fft.hfft", "_refs.fft.irfft"]: + shapes = ((2, 9, 9), (33,)) + elif self.name in [ + "fft.hfft2", + "fft.irfft2", + "_refs.fft.hfft2", + "_refs.fft.irfft2", + ]: + shapes = ((2, 8, 9), (33,)) + elif self.name in [ + "fft.hfftn", + "fft.irfftn", + "_refs.fft.hfftn", + "_refs.fft.irfftn", + ]: + shapes = ((2, 2, 33), (33,)) + # Adjusting the limits because the test would be flaky due to over-saturation of float16 + # See: https://github.com/pytorch/pytorch/pull/81416 + low = -1.0 + high = 1.0 + else: + shapes = ((2, 8, 16), (32,)) + nd_tensor = partial( + make_tensor, + shapes[0], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + oned_tensor = partial( + make_tensor, + shapes[1], + device=device, + low=low, + high=high, + dtype=dtype, + requires_grad=requires_grad, + ) + + if self.ndimensional == SpectralFuncType.ND: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(8,)) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3, (0, -1)]) + elif self.ndimensional == SpectralFuncType.TwoD: + yield SampleInput( + nd_tensor(), + s=(3, 10) if not is_fp16_or_chalf else (4, 8), + dim=(1, 2), + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), s=(6, 8) if not is_fp16_or_chalf else (4, 8)) + yield SampleInput(nd_tensor(), dim=0) + yield SampleInput(nd_tensor(), dim=(0, -1)) + yield SampleInput(nd_tensor(), dim=(-3, -2, -1)) + else: + yield SampleInput( + nd_tensor(), + n=10 if not is_fp16_or_chalf else 8, + dim=1, + norm="ortho", + ) + yield SampleInput(nd_tensor(), norm="ortho") + yield SampleInput(nd_tensor(), n=7 if not is_fp16_or_chalf else 8) + yield SampleInput(oned_tensor()) + yield from (SampleInput(nd_tensor(), dim=dim) for dim in [-1, -2, -3]) + + +SpectralFuncType = Enum("SpectralFuncType", ("OneD", "TwoD", "ND")) + + +# Metadata class for Fast Fourier Transforms in torch.fft. +class SpectralFuncInfo(OpInfo): + """Operator information for torch.fft transforms.""" + + def __init__( + self, + name, # the string name of the function + *, + ref=None, # Reference implementation (probably in np.fft namespace) + dtypes=floating_and_complex_types(), + ndimensional: SpectralFuncType, + sample_inputs_func=sample_inputs_spectral_ops, + decorators=None, + **kwargs, + ): + self._original_spectral_func_args = dict(locals()).copy() + self._original_spectral_func_args.update(kwargs) + + decorators = list(decorators) if decorators is not None else [] + decorators += [ + skipCPUIfNoFFT, + DecorateInfo( + toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}), + "TestCommon", + "test_complex_half_reference_testing", + ), + ] + + super().__init__( + name=name, + dtypes=dtypes, + decorators=decorators, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + self.ndimensional = ndimensional + + +class ShapeFuncInfo(OpInfo): + """Early version of a specialized OpInfo for Shape manipulating operations like tile and roll""" + + def __init__( + self, + name, # the string name of the function + *, + ref, # a reference function + dtypes=floating_types(), + dtypesIfCUDA=None, + dtypesIfROCM=None, + dtypesIfXPU=None, + sample_inputs_func=None, + **kwargs, + ): + super().__init__( + name, + dtypes=dtypes, + dtypesIfCUDA=dtypesIfCUDA, + dtypesIfROCM=dtypesIfROCM, + dtypesIfXPU=dtypesIfXPU, + sample_inputs_func=sample_inputs_func, + **kwargs, + ) + self.ref = ref + + +def sample_inputs_foreach( + self, + device, + dtype, + N, + *, + noncontiguous=False, + same_size=False, + low=None, + high=None, + zero_size: bool, + requires_grad: bool, + # mutually exclusive from same_size and zero_size, which are all or nothing + intersperse_empty_tensors: bool = False, +): + if zero_size: + return [torch.empty(0, dtype=dtype, device=device) for _ in range(N)] + if same_size: + return [ + make_tensor( + (N, N), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for _ in range(N) + ] + else: + # interweave some empty tensors + have the last 2 tensors be empty (see #100701) + return [ + torch.empty(0, dtype=dtype, device=device, requires_grad=requires_grad) + if (i % 3 == 0 or i >= N - 2) and intersperse_empty_tensors + else make_tensor( + (N - i, N - i), + dtype=dtype, + device=device, + noncontiguous=noncontiguous, + low=low, + high=high, + requires_grad=requires_grad, + ) + for i in range(N) + ] + + +def get_foreach_method_names(name): + # get torch inplace reference function + op_name = "_foreach_" + name + inplace_op_name = op_name + "_" + + op = getattr(torch, op_name, None) + inplace_op = getattr(torch, inplace_op_name, None) + + ref = getattr(torch, name, None) + ref_inplace = getattr(torch.Tensor, name + "_", None) + return op, inplace_op, ref, ref_inplace + + +@dataclass +class ForeachFuncInfo(OpInfo): + """Early version of a specialized OpInfo for foreach functions + + The main differences from the parent class are (a) `dtypes`, `dtypesIfCUDA`, and `dtypesIfROCM` + are set to `get_all_dtypes(include_qint=False)`, and (b) the following arguments. + + ``supports_alpha_param=True`` means that the function supports a python scalar (``numbers.Number``) + as the last keyword argument such as `_foreach_add`. + ``supports_scalar_self_arg=True`` means that the function can take a python scalar as its first argument. + Currently only `_foreach_pow` supports this. + ``backward_requires_result=True``, which could sound self-explanatory, means that the function uses + the forward result for its backward computation. + """ + + supports_alpha_param: bool = False + supports_scalar_self_arg: bool = False + backward_requires_result: bool = False + + def __post_init__(self): + ( + foreach_method, + foreach_method_inplace, + torch_ref_method, + torch_ref_inplace, + ) = get_foreach_method_names(self.name) + if not self.supports_out: + # note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call + # `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero` + # is not defined at the moment. Thus to skip the qualification, set a similar torch + # function. + assert foreach_method is None + assert torch_ref_method is None + foreach_method = foreach_method_inplace + torch_ref_method = torch_ref_inplace + + self.dtypes = _dispatch_dtypes(get_all_dtypes(include_qint=False)) + + self.op = foreach_method + self.method_variant = foreach_method + self.ref = torch_ref_method + self.inplace_variant = foreach_method_inplace + self.ref_inplace = torch_ref_inplace + self.has_no_in_place = self.inplace_variant is None + + name = self.name + self.name = f"_foreach_{name}" + if name == "norm": + self.ref = torch.linalg.vector_norm + elif name == "minimum": + # because minimum ref does not support inplace or scalar + self.ref = torch.clamp_max + self.ref_inplace = torch.Tensor.clamp_max_ + elif name == "maximum": + # because maximum ref does not support inplace or scalar + self.ref = torch.clamp_min + self.ref_inplace = torch.Tensor.clamp_min_ + + # The following sets `dtypesIfCUDA` and `dtypesIfROCM` accordingly. + super().__post_init__() + + def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs): + if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"): + return [] + return self.sample_inputs_func.sample_zero_size_tensor_inputs( + self, device, dtype, requires_grad, **kwargs + ) + + +def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs): + """Gradcheck wrapper for functions that take Hermitian matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the Hermitian property of the input. + """ + return op(input + input.mH, *args, **kwargs) + + +def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs): + """Gradcheck wrapper for functions that take lower or upper triangular matrices as input. + + They require a modified function because the finite-difference algorithm + for calculating derivatives does not preserve the triangular property of the input. + `idx` is used to specific which `args[idx]` is to be triangularized. + """ + triangular_arg = args[idx].triu() if upper else args[idx].tril() + return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs) + + +def gradcheck_wrapper_triangular_input_real_positive_diagonal( + op, *args, upper=False, idx=0, **kwargs +): + """Gradcheck wrapper for functions that take lower/upper triangular matrices + with real and positive diagonals, for example, cholesky-like operations. + """ + arg = args[idx] + arg_diag = arg.diagonal(0, -2, -1) + arg_diag_embed = torch.diag_embed(arg_diag) + id_diag_tensor = torch.ones_like(arg_diag) + id_tensor = torch.diag_embed(id_diag_tensor) + # new_arg = arg - diag(arg) + I + new_arg = arg - arg_diag_embed + id_tensor + return gradcheck_wrapper_triangular_input( + op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs + ) + + +def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked operations. + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + mask = kwargs.get("mask") + if mask is not None: + output_mask = torch.masked._output_mask(op, input, *args, **kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs): + """Gradcheck wrapper for masked pointwise operations. Assumes that the result + will be masked iff both tensors are masked at a specific index + + When mask is specified, replaces masked-out elements with zeros. + + Use for operations that produce non-finite masked-out elements, + for instance, for minimum and maximum reductions. + """ + output = op(input, *args, **kwargs) + input_mask = kwargs.get("input_mask") + other_mask = kwargs.get("other_mask") + if input_mask is not None and other_mask is not None: + combined_mask = torch.logical_and(input_mask, other_mask) + new_kwargs = dict(mask=combined_mask, **kwargs) + output_mask = torch.masked._input_mask(input, *args, **new_kwargs) + output = torch.where(output_mask, output, output.new_zeros([])) + return output + + +def clone_sample(sample, **kwargs): + """ + Given a SampleInput, this function analyzes its input, args and kwargs, + and produces a copy with each non-Tensor entry being copied by reference, + and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)` + """ + + def clone_tensor(t): + if isinstance(t, torch.Tensor): + return t.detach().clone().requires_grad_(t.requires_grad) + else: + return t + + sample_kwargs = kwargs if kwargs else sample.kwargs + + return SampleInput( + clone_tensor(sample.input), + args=tuple(map(clone_tensor, sample.args)), + kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()}, + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4820a3eae23293c31ff45ed4260870171b532cd4 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__init__.py @@ -0,0 +1,28 @@ +# mypy: ignore-errors + +from typing import List + +from torch.testing._internal.opinfo.core import OpInfo +from torch.testing._internal.opinfo.definitions import ( + _masked, + fft, + linalg, + signal, + special, +) + + +# Operator database +op_db: List[OpInfo] = [ + *fft.op_db, + *linalg.op_db, + *signal.op_db, + *special.op_db, + *_masked.op_db, +] + +python_ref_db: List[OpInfo] = [ + *fft.python_ref_db, + *linalg.python_ref_db, + *special.python_ref_db, +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0bef2ac58fe8380f68b89d6f4fbc7a72b5b899f Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914ecad47772e1109607607a1df93c45d423f078 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/_masked.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92435a8a95d41edbdf6215a70e455e979cb3b635 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/fft.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08b4747ffa869b7024ccdc624b99dd321c936e13 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/linalg.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d86170e312d8e9e32d932b9f0fe05d3993075254 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/nested.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..210d60f5a0d39cb21226431b8e26194324973f48 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/signal.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d1441913fb6687f62d37e4dd8d573855f4f8e3 Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/sparse.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a07209f542a50563babae764307613677ab6000c Binary files /dev/null and b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/__pycache__/special.cpython-310.pyc differ diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py new file mode 100644 index 0000000000000000000000000000000000000000..eda339ebfe68a60a08162a8a1171371fdce72e84 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/_masked.py @@ -0,0 +1,1228 @@ +# mypy: ignore-errors + +import unittest +from collections.abc import Sequence +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import tol, toleranceOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, + complex_types, + floating_and_complex_types_and, + floating_types_and, + integral_types, +) +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + M, + OpInfo, + ReductionOpInfo, + S, + sample_inputs_reduction, + SampleInput, +) +from torch.testing._internal.opinfo.utils import prod_numpy, reference_reduction_numpy + + +# Used for log_softmax, softmax, softmin +def sample_inputs_softmax_variant( + op_info, + device, + dtype, + requires_grad, + with_dtype=False, + use_zero_dimensions=True, + **kwargs, +): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), (0,)), + ((S, S), (0,)), + ((S, S), (1,)), + ((S, S), (-1,)), + ((S, M, S), (2,)), + *([((S, 0, 0), (-1,))] if use_zero_dimensions else []), + ] + kwargs = dict(dtype=torch.float64) if with_dtype else None + + # PyTorch on XLA throws an error when passed with dim argument for 0d tensor. + # See https://github.com/pytorch/xla/issues/3061 for more details. + if torch.device(device).type != "xla": + cases.append(((), (0,))) + + return ( + SampleInput(make_arg(shape), args=dim, kwargs=kwargs) for shape, dim in cases + ) + + +def _generate_masked_op_mask(input_shape, device, **kwargs): + make_arg = partial( + make_tensor, dtype=torch.bool, device=device, requires_grad=False + ) + yield None + yield make_arg(input_shape) + if len(input_shape) > 2: + # broadcast last mask dimension: + yield make_arg(input_shape[:-1] + (1,)) + # broadcast middle mask dimension: + yield make_arg(input_shape[:1] + (1,) + input_shape[2:]) + # broadcast first mask dimension: + yield make_arg((1,) + input_shape[1:]) + # mask.ndim < input.ndim + yield make_arg(input_shape[1:]) + # mask.ndim == 1 + yield make_arg(input_shape[-1:]) + # masks that require broadcasting of inputs (mask.ndim > + # input.ndim) will not be supported, however, we may + # reconsider this if there will be demand on this kind of + # degenerate cases. + + +def sample_inputs_masked_reduction(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked reduction operators. + + Masked reduction operator is a reduction operator with trailing + mask optional argument. A mask is a bool tensor with the same + shape as input or a shape that is broadcastable to input shape. + """ + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_sparse_coo_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse coo layouts. + """ + if op_info.supports_sparse: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse()) + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in {"prod", "amax", "amin"}: + # FIXME: for now reductions with non-zero reduction identity and + # unspecified mask are not supported for sparse COO + # tensors, see torch.masked.prod implementation + # for details. + continue + yield SampleInput( + sample_input.input.to_sparse(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def sample_inputs_sparse_csr_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs +): + """Sample inputs for masked reduction operators that support inputs + with sparse csr layouts. + """ + if op_info.supports_sparse_csr: + op_name = op_info.name.replace("masked.", "") + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if not ( + sample_input.input.ndim == 2 and sample_input.kwargs.get("keepdim") + ): + # - sparse CSR tensors are always 2-D tensors + # - masked reduction on CSR tensors are defined only if keepdim is True. + continue + mask = sample_input.kwargs.get("mask") + if mask is not None: + sample_input_kwargs = sample_input.kwargs.copy() + sample_input_kwargs.update(mask=mask.to_sparse_csr()) + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + else: + if op_name in ["prod", "amax", "amin", "mean"]: + # reductions with non-zero reduction identity and + # unspecified mask is not supported for sparse CSR + # tensors, see torch.masked.prod implementation + # for details. + continue + new_sample = SampleInput( + sample_input.input.to_sparse_csr(), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + yield new_sample + if sample_input.kwargs["dim"] == 0: + # Reductions of CSR tensors use different implementations for + # inner and/or outer dimensions. So, as a minimum of testing CSR + # implementations the following kwargs must be generated: + # dict(dim=0, keepdim=True) + # dict(dim=1, keepdim=True) + # dict(dim=(0, 1), keepdim=True) + # Here we generate the dim=1 case from the dim=0 case. + sample_input_kwargs = new_sample.kwargs.copy() + sample_input_kwargs.update(dim=1) + yield SampleInput( + new_sample.input.clone(), + args=sample_input.args, + kwargs=sample_input_kwargs, + ) + + +def sample_inputs_masked_norm(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked norm.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_masked_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + sample_input_args, sample_input_kwargs = ( + ord, + ) + sample_input.args, sample_input.kwargs.copy() + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + +def reference_masked_std_var( + numpy_fn, +): + ref = reference_reduction_numpy(numpy_fn) + + # Translate unbiased or correction arguments into ddof + def func( + input, + dim=None, + unbiased=None, + *, + correction=None, + **kwargs, + ): + ddof = 1 + if unbiased is not None: + ddof = 1 if unbiased else 0 + if correction is not None: + ddof = correction + + if isinstance(dim, Sequence): + dim = tuple(dim) + + return ref(input, dim, ddof=ddof, **kwargs) + + return func + + +def sample_inputs_masked_std_var(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked std/var.""" + kwargs["supports_multiple_dims"] = op_info.supports_multiple_dims + from torch.testing._internal.common_methods_invocations import sample_inputs_std_var + + def masked_samples(): + for sample_input in sample_inputs_std_var( + op_info, device, dtype, requires_grad, **kwargs + ): + if len(sample_input.args) and isinstance(sample_input.args[0], bool): + continue # masked.{std, var} doesn't support `.var(unbiased)` + + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + yield SampleInput( + sample_input.input.detach().requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + if ( + not requires_grad + and dtype.is_floating_point + and sample_input.input.ndim == 2 + and mask is not None + and mask.shape == sample_input.input.shape + ): + for v in [torch.inf, -torch.inf, torch.nan]: + t = sample_input.input.detach() + t.diagonal(0, -2, -1).fill_(v) + yield SampleInput( + t.requires_grad_(requires_grad), + args=sample_input_args, + kwargs=sample_input_kwargs, + ) + + for sample_input in masked_samples(): + correction = sample_input.kwargs.get("correction") + if correction is None: + correction = int(sample_input.kwargs.get("unbiased", True)) + + dim = sample_input.kwargs.get("dim", None) + + if sample_input.kwargs.get("mask") is None: + orig_count = torch.masked.sum( + torch.ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + ) + else: + inmask = torch.masked._input_mask( + sample_input.input, *sample_input.args, **sample_input.kwargs + ) + orig_count = torch.masked.sum( + inmask.new_ones(sample_input.input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + if orig_count.min() <= correction + 1: + # Skip samples that lead to nans in var computation + continue + + yield sample_input + + +def sample_inputs_masked_softmax( + op_info, device, dtype, requires_grad, with_dtype=False, **kwargs +): + """Sample inputs for masked softmax, log_softmax, and softmin. + + Masked normalization operator is a reduction operator with + trailing mask optional argument. A mask is a bool tensor with the + same shape as input or a shape that is broadcastable to input + shape. + """ + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, with_dtype=with_dtype, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input.args, + mask=mask, + **sample_input.kwargs, + ) + + +def sample_inputs_masked_cumops(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked cumsum and cumprod.""" + inputs: List[SampleInput] = [] + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, **kwargs + ): + for mask in _generate_masked_op_mask( + sample_input.input.shape, device, **kwargs + ): + if type(mask) != torch.Tensor: + continue + sample_input_args, sample_input_kwargs = sample_input.args, dict( + mask=mask, **sample_input.kwargs + ) + if "keepdim" in sample_input_kwargs: + sample_input_kwargs.pop("keepdim") + # dimension is required + if sample_input_args: + dim = sample_input.args[0] + else: + if "dim" not in sample_input_kwargs: + continue + dim = sample_input_kwargs.pop("dim") + sample_input_args = (dim,) + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + *sample_input_args, + **sample_input_kwargs, + ) + + +def sample_inputs_masked_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked logaddexp.""" + shapes = [(S,), (S, S), (S, M, S)] + input_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + other_mask_lists = [ + list(_generate_masked_op_mask(shape, device, **kwargs)) for shape in shapes + ] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for shape, input_masks, other_masks in zip( + shapes, input_mask_lists, other_mask_lists + ): + for input_mask, other_mask in zip(input_masks, other_masks): + yield SampleInput( + make_arg(shape), + make_arg(shape), + input_mask=input_mask, + other_mask=other_mask, + ) + + +def sample_inputs_masked_normalize(op_info, device, dtype, requires_grad, **kwargs): + """Sample inputs for masked normalize.""" + for ord in [2.0, 1, float("inf"), float("-inf"), 0]: + for sample_input in sample_inputs_softmax_variant( + op_info, device, dtype, requires_grad, use_zero_dimensions=False, **kwargs + ): + yield SampleInput( + sample_input.input.clone().requires_grad_(requires_grad), + ord, + *sample_input.args, + **sample_input.kwargs, + ) + + +op_db: List[OpInfo] = [ + ReductionOpInfo( + "masked.sum", + ref=reference_reduction_numpy(np.sum), + method_variant=None, + identity=0, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=5e-2), + torch.float16: tol(atol=1e-03, rtol=5e-3), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=0.1, rtol=0.1), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestMasked", + "test_mask_layout", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + ReductionOpInfo( + "masked.prod", + ref=prod_numpy, + method_variant=None, + identity=1, + nan_policy="propagate", + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Failing on some jobs"), + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool, torch.int8, torch.int16, torch.int32), + ), + DecorateInfo( + "TestReductions", + "test_ref_small_input", + dtypes=(torch.int8, torch.int16, torch.int32), + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + device_type="cuda", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-02)}), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_duplicate_values", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1.5e-03)}), + "TestMasked", + "test_mask_layout", + device_type="cpu", + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-05)}), + "TestOperators", + "test_jvp", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + ), + OpInfo( + "masked.cumsum", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.cumprod", + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + method_variant=None, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCompositeCompliance", + "test_backward", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=2.6e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + # Can reuse the same inputs; dim is required in both + sample_inputs_func=sample_inputs_masked_cumops, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amax", + nan_policy="propagate", + supports_out=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amax), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.amin", + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_sparse=True, + supports_sparse_csr=True, + ref=reference_reduction_numpy(np.amin), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: amax reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: Unknown builtin op: aten::iinfo + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "cuda_scatter_gather_base_kernel_func" not implemented for ... (used for sparse_coo inputs) + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_coo_func=sample_inputs_sparse_coo_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmax", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmax + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.argmin", + supports_out=False, + supports_multiple_dims=False, + supports_autograd=False, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # initial is not a keyword for argmin + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_reference_masked" + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.mean", + ref=reference_reduction_numpy(np.mean) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_sparse_csr=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_duplicate_values", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_reference_masked", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestReductions", + "test_ref_small_input", + dtypes=(torch.bool,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: "_segment_reduce_lengths_cpu/cuda" not implemented for ... (used for sparse_csr inputs) + DecorateInfo( + unittest.skip("Skipped!"), + "TestMasked", + "test_mask_layout", + dtypes=(torch.bool, *integral_types(), *complex_types()), + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-03, rtol=0.05), + torch.float16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1e-03)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=2e-03)}), + "TestSparseCompressed", + "test_consistency", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_reduction, + sample_inputs_sparse_csr_func=sample_inputs_sparse_csr_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + OpInfo( + "masked.median", + dtypes=floating_types_and(torch.bfloat16, torch.float16), + method_variant=None, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + ), + sample_inputs_func=partial( + sample_inputs_masked_softmax, use_zero_dimensions=False + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.norm", + identity=0, + method_variant=None, + nan_policy="propagate", + supports_out=False, + promotes_int_to_float=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # torch.jit.frontend.NotSupportedError: Compiled functions + # can't take variable number of arguments or use + # keyword-only arguments with defaults + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_norm, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), + ReductionOpInfo( + "masked.var", + ref=reference_masked_std_var(np.var) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=1e-03, rtol=1e-03), + } + ), + "TestMasked", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=4e-5, rtol=2e-2), + } + ), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + ReductionOpInfo( + "masked.std", + ref=reference_masked_std_var(np.std) + if np.lib.NumpyVersion(np.__version__) >= "1.20.2" + else None, + method_variant=None, + nan_policy="propagate", + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + # RuntimeError: undefined value tensor + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride( + { + torch.bfloat16: tol(atol=1e-02, rtol=1e-02), + torch.float16: tol(atol=1e-02, rtol=1e-02), + } + ), + "TestReductions", + "test_reference_masked", + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + "TestReductions", + "test_ref_small_input", + ), + DecorateInfo( + toleranceOverride( + { + torch.float16: tol(atol=1e-02, rtol=1e-02), + torch.bfloat16: tol(atol=5e-03, rtol=5e-04), + } + ), + "TestMasked", + "test_reference_masked", + ), + ], + sample_inputs_func=sample_inputs_masked_std_var, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + check_batched_grad=True, + ), + OpInfo( + "masked.softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.log_softmax", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + decorators=[ + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1e-02)}), + "TestMasked", + "test_reference_masked", + ), + ], + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.softmin", + method_variant=None, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_softmax, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # FIXME: + # Mismatched elements: 2 / 2 (100.0%) + # Greatest absolute difference: nan at index (0,) (up to 0.0001 allowed) + # Greatest relative difference: nan at index (0,) (up to 0.0001 allowed + DecorateInfo( + unittest.skip("Skipped!"), + "TestOperators", + "test_vmapvjpvjp", + device_type="cpu", + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.normalize", + method_variant=None, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_normalize, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-5, rtol=6e-3)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ], + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + ), + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo( + "masked.logaddexp", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestFwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), "TestBwdGradients", "test_fn_gradgrad" + ), + ), + sample_inputs_func=sample_inputs_masked_logaddexp, + gradcheck_wrapper=gradcheck_wrapper_masked_pointwise_operation, + ), + ReductionOpInfo( + "masked.logsumexp", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + method_variant=None, + nan_policy="propagate", + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.skip("Skipped!"), "TestReductions", "test_dim_empty_keepdim" + ), + # Identity can't be -torch.inf without overflow + DecorateInfo( + unittest.skip("Skipped!"), + "TestReductions", + "test_empty_tensor_empty_slice", + ), + # NotSupportedError: Compiled functions can't ... use keyword-only arguments with defaults + DecorateInfo( + unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit" + ), + # all the values are the same except for -inf vs nan + DecorateInfo(unittest.skip("Skipped!"), "TestDecomp", "test_comprehensive"), + # FIXME: + # Mismatched elements: 2 / 12 (16.7%) + # Greatest absolute difference: 9223372034707292160 at index (0, 0, 0, 0) + # Greatest relative difference: 0.0 at index (0, 0, 0, 1) + DecorateInfo( + unittest.skip("Skipped!"), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cpu", + ), + ), + sample_inputs_func=sample_inputs_masked_reduction, + gradcheck_wrapper=gradcheck_wrapper_masked_operation, + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed395eef0203afc52f7ec9bcc24bd2d7ce5fa18 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/fft.py @@ -0,0 +1,810 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import SM53OrLater +from torch.testing._internal.common_device_type import precisionOverride +from torch.testing._internal.common_dtype import ( + all_types_and, + all_types_and_complex_and, +) +from torch.testing._internal.common_utils import TEST_SCIPY, TEST_WITH_ROCM +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + sample_inputs_spectral_ops, + SampleInput, + SpectralFuncInfo, + SpectralFuncType, +) +from torch.testing._internal.opinfo.refs import ( + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, +) + + +has_scipy_fft = False +if TEST_SCIPY: + try: + import scipy.fft + + has_scipy_fft = True + except ModuleNotFoundError: + pass + + +class SpectralFuncPythonRefInfo(SpectralFuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant="", + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant, op_db=op_db + ) + assert isinstance(self.torch_opinfo, SpectralFuncInfo) + + inherited = self.torch_opinfo._original_spectral_func_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +def error_inputs_fft(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Zero-dimensional tensor has no dimension to take FFT of + yield ErrorInput( + SampleInput(make_arg()), + error_type=IndexError, + error_regex="Dimension specified as -1 but tensor has no dimensions", + ) + + +def error_inputs_fftn(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + # Specifying a dimension on a zero-dimensional tensor + yield ErrorInput( + SampleInput(make_arg(), dim=(0,)), + error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions", + ) + + +def sample_inputs_fft_with_min( + op_info, device, dtype, requires_grad=False, *, min_size, **kwargs +): + yield from sample_inputs_spectral_ops( + op_info, device, dtype, requires_grad, **kwargs + ) + if TEST_WITH_ROCM: + # FIXME: Causes floating point exception on ROCm + return + + # Check the "Invalid number of data points" error isn't too strict + # https://github.com/pytorch/pytorch/pull/109083 + a = make_tensor(min_size, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(a) + + +def sample_inputs_fftshift(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor( + shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs + ) + + yield SampleInput(mt((9, 10))) + yield SampleInput(mt((50,)), kwargs=dict(dim=0)) + yield SampleInput(mt((5, 11)), kwargs=dict(dim=(1,))) + yield SampleInput(mt((5, 6)), kwargs=dict(dim=(0, 1))) + yield SampleInput(mt((5, 6, 2)), kwargs=dict(dim=(0, 2))) + + +# Operator database +op_db: List[OpInfo] = [ + SpectralFuncInfo( + "fft.fft", + aten_name="fft_fft", + decomp_aten_name="_fft_c2c", + ref=np.fft.fft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + SpectralFuncInfo( + "fft.fft2", + aten_name="fft_fft2", + ref=np.fft.fft2, + decomp_aten_name="_fft_c2c", + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_complex_half_reference_testing", + device_type="cuda", + dtypes=[torch.complex32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + SpectralFuncInfo( + "fft.fftn", + aten_name="fft_fftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.fftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})], + ), + SpectralFuncInfo( + "fft.hfft", + aten_name="fft_hfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.hfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=2), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + check_batched_gradgrad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + ), + ), + SpectralFuncInfo( + "fft.hfft2", + aten_name="fft_hfft2", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfft2 if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + # FIXME: errors are too large; needs investigation + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_complex_half_reference_testing", + device_type="cuda", + ), + ), + ), + SpectralFuncInfo( + "fft.hfftn", + aten_name="fft_hfftn", + decomp_aten_name="_fft_c2r", + ref=scipy.fft.hfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(2, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + ], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + ), + ), + ), + SpectralFuncInfo( + "fft.rfft", + aten_name="fft_rfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft, + ndimensional=SpectralFuncType.OneD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + skips=(), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.rfft2", + aten_name="fft_rfft2", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfft2, + ndimensional=SpectralFuncType.TwoD, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.rfftn", + aten_name="fft_rfftn", + decomp_aten_name="_fft_r2c", + ref=np.fft.rfftn, + ndimensional=SpectralFuncType.ND, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + precisionOverride({torch.float: 1e-4}), + ], + ), + SpectralFuncInfo( + "fft.ifft", + aten_name="fft_ifft", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=1), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + ), + SpectralFuncInfo( + "fft.ifft2", + aten_name="fft_ifft2", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ifftn", + aten_name="fft_ifftn", + decomp_aten_name="_fft_c2c", + ref=np.fft.ifftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.ihfft", + aten_name="fft_ihfft", + decomp_aten_name="_fft_r2c", + ref=np.fft.ihfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fft, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + skips=(), + check_batched_grad=False, + ), + SpectralFuncInfo( + "fft.ihfft2", + aten_name="fft_ihfft2", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=( + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warnings"), + ), + ), + SpectralFuncInfo( + "fft.ihfftn", + aten_name="fft_ihfftn", + decomp_aten_name="_fft_r2c", + ref=scipy.fft.ihfftn if has_scipy_fft else None, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 1)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archss + dtypesIfCUDA=all_types_and( + torch.bool, *(() if (not SM53OrLater) else (torch.half,)) + ), + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[ + # The values for attribute 'shape' do not match: torch.Size([5, 6, 5]) != torch.Size([5, 6, 6]). + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_warning"), + # Mismatched elements! + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out"), + DecorateInfo( + precisionOverride({torch.float: 2e-4}), "TestFFT", "test_reference_nd" + ), + ], + ), + SpectralFuncInfo( + "fft.irfft", + aten_name="fft_irfft", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft, + ndimensional=SpectralFuncType.OneD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fft, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + ), + SpectralFuncInfo( + "fft.irfft2", + aten_name="fft_irfft2", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfft2, + ndimensional=SpectralFuncType.TwoD, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncInfo( + "fft.irfftn", + aten_name="fft_irfftn", + decomp_aten_name="_fft_c2r", + ref=np.fft.irfftn, + ndimensional=SpectralFuncType.ND, + sample_inputs_func=partial(sample_inputs_fft_with_min, min_size=(1, 2)), + error_inputs_func=error_inputs_fftn, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.bool), + # CUDA supports Half/ComplexHalf Precision FFT only on SM53 or later archs + dtypesIfCUDA=all_types_and_complex_and( + torch.bool, + *(() if (not SM53OrLater) else (torch.half, torch.complex32)), + ), + check_batched_gradgrad=False, + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + OpInfo( + "fft.fftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "fft.ifftshift", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.half, torch.chalf + ), + sample_inputs_func=sample_inputs_fftshift, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), +] + +python_ref_db: List[OpInfo] = [ + SpectralFuncPythonRefInfo( + "_refs.fft.fft", + torch_opinfo_name="fft.fft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft", + torch_opinfo_name="fft.ifft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft", + torch_opinfo_name="fft.rfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft", + torch_opinfo_name="fft.irfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft", + torch_opinfo_name="fft.hfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft", + torch_opinfo_name="fft.ihfft", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fftn", + torch_opinfo_name="fft.fftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifftn", + torch_opinfo_name="fft.ifftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfftn", + torch_opinfo_name="fft.rfftn", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfftn", + torch_opinfo_name="fft.irfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfftn", + torch_opinfo_name="fft.hfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfftn", + torch_opinfo_name="fft.ihfftn", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + # AssertionError: Reference result was farther (0.09746177145360499) from the precise + # computation than the torch result was (0.09111555632069855) + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_torch_fallback", + dtypes=(torch.float16,), + device_type="cuda", + ), + # AssertionError: Reference result was farther (0.0953431016138116) from the precise + # computation than the torch result was (0.09305490684430734) + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_executor", + dtypes=(torch.float16,), + device_type="cuda", + ), + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.fft2", + torch_opinfo_name="fft.fft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ifft2", + torch_opinfo_name="fft.ifft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.rfft2", + torch_opinfo_name="fft.rfft2", + ), + SpectralFuncPythonRefInfo( + "_refs.fft.irfft2", + torch_opinfo_name="fft.irfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.hfft2", + torch_opinfo_name="fft.hfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4, torch.cfloat: 2e-4}), + "TestFFT", + "test_reference_nd", + ) + ], + ), + SpectralFuncPythonRefInfo( + "_refs.fft.ihfft2", + torch_opinfo_name="fft.ihfft2", + decorators=[ + DecorateInfo( + precisionOverride({torch.float: 2e-4}), + "TestFFT", + "test_reference_nd", + ), + # FIXME: + # Reference result was farther (0.0953431016138116) from the precise computation + # than the torch result was (0.09305490684430734)! + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_python_ref_executor", + device_type="cuda", + ), + ], + ), + PythonRefInfo( + "_refs.fft.fftshift", + op_db=op_db, + torch_opinfo_name="fft.fftshift", + ), + PythonRefInfo( + "_refs.fft.ifftshift", + op_db=op_db, + torch_opinfo_name="fft.ifftshift", + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..e94c6a67114431c224131124d89ab8868ca13e66 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/linalg.py @@ -0,0 +1,2481 @@ +# mypy: ignore-errors + +import itertools +import random +import unittest +from functools import partial +from itertools import chain, product +from typing import Iterable, List, Tuple + +import numpy as np +from numpy import inf + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import ( + _get_magma_version, + _get_torch_cuda_version, + with_tf32_off, +) +from torch.testing._internal.common_device_type import ( + has_cusolver, + skipCPUIfNoLapack, + skipCUDAIf, + skipCUDAIfNoCusolver, + skipCUDAIfNoMagma, + skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoMagmaAndNoLinalgsolver, + skipCUDAIfRocm, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import ( + all_types_and_complex, + all_types_and_complex_and, + floating_and_complex_types, + floating_and_complex_types_and, + get_all_complex_dtypes, +) +from torch.testing._internal.common_utils import ( + GRADCHECK_NONDET_TOL, + IS_MACOS, + make_fullrank_matrices_with_distinct_singular_values, + skipIfSlowGradcheckEnv, + slowTest, + TEST_WITH_ROCM, +) +from torch.testing._internal.opinfo.core import ( + clone_sample, + DecorateInfo, + ErrorInput, + gradcheck_wrapper_hermitian_input, + L, + M, + OpInfo, + ReductionOpInfo, + S, + SampleInput, +) +from torch.testing._internal.opinfo.refs import PythonRefInfo, ReductionPythonRefInfo + + +def sample_kwargs_vector_norm(t, **kwargs): + # orders with / without identity + def ords(): + has_id = (6, 4, 2, 1, 0, 0.9) + no_id = (inf, -2.1, -inf) + if t.numel() == 0: + dim = kwargs.get("dim") + if dim is None: + return has_id + if not isinstance(dim, Iterable): + dim = (dim,) + for d in dim: + if t.size(d) == 0: + return has_id + return has_id + no_id + + return (((), dict(ord=o)) for o in ords()) + + +def sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + is_linalg_svd = "linalg.svd" in op_info.name + batches = [(), (0,), (3,)] + ns = [0, 3, 5] + + def uniformize(usv): + S = usv[1] + k = S.shape[-1] + U = usv[0][..., :k] + Vh = usv[2] if is_linalg_svd else usv[2].mH + Vh = Vh[..., :k, :] + return U, S, Vh + + def fn_U(usv): + U, _, _ = uniformize(usv) + return U.abs() + + def fn_S(usv): + return uniformize(usv)[1] + + def fn_Vh(usv): + # We also return S to test + _, S, Vh = uniformize(usv) + return S, Vh.abs() + + def fn_UVh(usv): + U, S, Vh = uniformize(usv) + return U @ Vh, S + + fns = (fn_U, fn_S, fn_Vh, fn_UVh) + + fullmat = "full_matrices" if is_linalg_svd else "some" + + for batch, n, k, fullmat_val, fn in product(batches, ns, ns, (True, False), fns): + shape = batch + (n, k) + yield SampleInput( + make_arg(*shape), kwargs={fullmat: fullmat_val}, output_process_fn_grad=fn + ) + + +def sample_inputs_cross(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + yield SampleInput(make_arg((S, 3)), args=(make_arg((S, 3)),)) + yield SampleInput( + make_arg((S, 3, S)), args=(make_arg((S, 3, S)),), kwargs=dict(dim=1) + ) + yield SampleInput(make_arg((1, 3)), args=(make_arg((S, 3)),), kwargs=dict(dim=-1)) + + +def error_inputs_cross(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + sample = SampleInput(input=make_arg((S, 3)), args=(make_arg((S, 1)),)) + err = "inputs dimension -1 must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((5, S, 3)), args=(make_arg((S, 3)),)) + err = "inputs must have the same number of dimensions" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput(input=make_arg((S, 2)), args=(make_arg((S, 2)),)) + err = "must have length 3" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + + sample = SampleInput( + input=make_arg((S, 2)), args=(make_arg((S, 2)),), kwargs=dict(dim=2) + ) + err = "Dimension out of range" + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + + +def sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs): + """ + This function generates input for torch.linalg.householder_product (torch.orgqr). + The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors. + Empty, square, rectangular, batched square and batched rectangular input is generated. + """ + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + low=-2, + high=2, + ) + # Each column of the matrix is getting multiplied many times leading to very large values for + # the Jacobian matrix entries and making the finite-difference result of grad check less accurate. + # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here. + yield SampleInput(make_arg((S, S)), make_arg((S,))) + yield SampleInput(make_arg((S + 1, S)), make_arg((S,))) + yield SampleInput(make_arg((2, 1, S, S)), make_arg((2, 1, S))) + yield SampleInput(make_arg((2, 1, S + 1, S)), make_arg((2, 1, S))) + yield SampleInput( + make_arg((0, 0), low=None, high=None), + make_arg((0,), low=None, high=None), + ) + yield SampleInput(make_arg((S, S)), make_arg((0,), low=None, high=None)) + # m = n = S, k = S - 2 + yield SampleInput(make_arg((S, S)), make_arg((S - 2,), low=None, high=None)) + # m = S, n = S -1, k = S - 2 + yield SampleInput(make_arg((S, S - 1)), make_arg((S - 2,), low=None, high=None)) + + +def sample_inputs_linalg_det_singular(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype) + + def make_singular_matrix_batch_base(size, rank): + assert size[-1] == size[-2] + assert rank > 0 and rank < size[-1] + + n = size[-1] + a = make_arg(size[:-2] + (n, rank)) / 10 + b = make_arg(size[:-2] + (rank, n)) / 10 + x = a @ b + lu, pivs, _ = torch.linalg.lu_factor_ex(x) + p, l, u = torch.lu_unpack(lu, pivs) + u_diag_abs = u.diagonal(0, -2, -1).abs() + u_diag_abs_largest = u_diag_abs.max(dim=-1, keepdim=True).values + u_diag_abs_smallest_idxs = torch.topk( + u_diag_abs, k=(n - rank), largest=False + ).indices + u.diagonal(0, -2, -1).div_(u_diag_abs_largest) + u.diagonal(0, -2, -1)[..., u_diag_abs_smallest_idxs] = torch.finfo(dtype).eps + matrix = p @ l @ u + + matrix.requires_grad_(requires_grad) + return matrix + + for batch, size in product(((), (2,), (2, 2)), range(6)): + shape = batch + (size, size) + for rank in range(1, size): + yield SampleInput(make_singular_matrix_batch_base(shape, rank)) + + +def sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad, **kwargs): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_arg_fullrank = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + # (, ()) + test_sizes = [ + (1, ()), + (2, (0,)), + (2, (2,)), + ] + + for matrix_size, batch_sizes in test_sizes: + size = batch_sizes + (matrix_size, matrix_size) + for n in (0, 3, 5): + yield SampleInput(make_arg(size), args=(n,)) + for n in [-4, -2, -1]: + yield SampleInput(make_arg_fullrank(*size), args=(n,)) + + +def sample_inputs_linalg_det_logdet_slogdet( + op_info, device, dtype, requires_grad, **kwargs +): + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + batches = [(), (0,), (3,)] + ns = [0, 1, 5] + + is_logdet = op_info.name == "logdet" + + for ( + batch, + n, + ) in product(batches, ns): + shape = batch + (n, n) + A = make_arg(*shape) + # Need to make the matrices in A have positive determinant for autograd + # To do so, we multiply A by its determinant to flip the sign of its determinant + if is_logdet and not A.is_complex() and A.numel() > 0: + s = torch.linalg.slogdet(A).sign + A = A * s.unsqueeze(-1).unsqueeze(-1) + A.requires_grad_(requires_grad) + yield SampleInput(A) + + +def sample_inputs_lu_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """Samples the inputs for both linalg.lu_solve and lu_solve""" + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_a = partial(make_fn, dtype=dtype, device=device) + make_b = partial(make_tensor, dtype=dtype, device=device) + + def clone(X, requires_grad): + Y = X.clone() + Y.requires_grad_(requires_grad) + return Y + + is_linalg_lu_solve = op_info.name == "linalg.lu_solve" + + batches = ((), (0,), (2,)) + ns = (3, 1, 0) + nrhs = (4, 1, 0) + + for n, batch, rhs in product(ns, batches, nrhs): + A = make_a(*(batch + (n, n))) + LU, pivots = torch.linalg.lu_factor(A) + + B = make_b(batch + (n, rhs)) + + grads = (False,) if not requires_grad else (True, False) + # we try all possible combinations of requires_grad for each input + for LU_grad, B_grad in product(grads, grads): + # when requires_grad == True, at least one input has to have requires_grad enabled + if requires_grad and not LU_grad and not B_grad: + continue + + if is_linalg_lu_solve: + for adjoint, left in product((True, False), repeat=2): + yield SampleInput( + clone(LU, LU_grad), + args=(pivots, clone(B if left else B.mT, B_grad)), + kwargs=dict(adjoint=adjoint, left=left), + ) + else: + yield SampleInput(clone(B, B_grad), args=(clone(LU, LU_grad), pivots)) + + +def sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad, **kwargs): + # Each test case consists of the sizes in the chain of multiplications + # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5) + test_cases = [ + [1, 2, 1], + [2, 0, 2], + [0, 2, 2], + [2, 2, 2, 2], + [2, 3, 4, 5], + [5, 4, 0, 2], + [2, 4, 3, 5, 3, 2], + ] + + for sizes in test_cases: + tensors = [] + for size in zip(sizes[:-1], sizes[1:]): + t = make_tensor( + size, dtype=dtype, device=device, requires_grad=requires_grad + ) + tensors.append(t) + yield SampleInput(tensors) + + +def sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs): + low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + + sizes = ((2, 2), (2, 3, 2)) + if dtype in low_precision_dtypes: + # svdvals not supported for low precision dtypes + ords = ("fro", inf, -inf, 1, -1) + else: + ords = ("fro", "nuc", inf, -inf, 1, -1, 2, -2) + dims = ((-2, -1), (-1, 0)) + + for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]): + yield SampleInput(make_arg(size), args=(ord, dim, keepdim)) + + +def sample_inputs_linalg_norm( + op_info, device, dtype, requires_grad, *, variant=None, **kwargs +): + if variant is not None and variant not in ("subgradient_at_zero",): + raise ValueError( + f"Unsupported variant, expected variant to be 'subgradient_at_zero' but got: {variant}" + ) + + test_sizes = [ + (S,), + (0,), + (S, S), + (0, 0), + (S, 0), + (0, S), + (S, S, S), + (0, S, S), + (S, 0, S), + (0, 0, 0), + ] + + vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf) + if dtype in {torch.float16, torch.bfloat16, torch.complex32}: + # svdvals not supported for low precision dtypes + matrix_ords = ("fro", inf, -inf, 1, -1) + else: + matrix_ords = (None, "fro", "nuc", inf, -inf, 1, -1, 2, -2) + + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + low=None, + high=None, + ) + + for test_size in test_sizes: + is_vector_norm = len(test_size) == 1 + is_matrix_norm = len(test_size) == 2 + + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + is_valid_for_p2 = is_vector_norm or (test_size[-1] != 0 and test_size[-2] != 0) + + for keepdim in [False, True]: + if variant != "subgradient_at_zero" and is_valid_for_p2: + yield SampleInput(make_arg(test_size), keepdim=keepdim) + + if not (is_vector_norm or is_matrix_norm): + continue + + ords = vector_ords if is_vector_norm else matrix_ords + + for ord in ords: + if is_vector_norm and test_size[-1] == 0: + if ord == np.inf or (ord is not None and ord < 0): + # RuntimeError: linalg.vector_norm cannot compute the + # {ord} norm on an empty tensor because the operation + # does not have an identity + continue + elif is_matrix_norm: + dims_to_check = { + None: (0,), + np.inf: (0,), + 2: (0, 1), + 1: (1,), + -1: (1,), + -2: (0, 1), + -np.inf: (0,), + }.get(ord, ()) + + if any(test_size[d] == 0 for d in dims_to_check): + # IndexError: amax(): Expected reduction dim {dim} to + # have non-zero size. + continue + + if variant == "subgradient_at_zero": + yield SampleInput( + torch.zeros( + test_size, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ), + ord, + keepdim=keepdim, + ) + else: + yield SampleInput(make_arg(test_size), ord, keepdim=keepdim) + + if ord in ["nuc", "fro"]: + yield SampleInput( + make_arg(test_size), ord=ord, keepdim=keepdim, dim=(0, 1) + ) + + +def sample_inputs_linalg_vecdot(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + batches = ((), (0,), (1,), (5,)) + ns = (0, 1, 3, 5) + for b, n in product(batches, ns): + shape = b + (n,) + yield SampleInput(make_arg(shape), args=(make_arg(shape),)) + for i in range(len(shape)): + yield SampleInput( + make_arg(shape), args=(make_arg(shape),), kwargs=dict(dim=i) + ) + + +def sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates invertible inputs for linear algebra ops + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + make_fn = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + + for batch, n in product(batches, ns): + yield SampleInput(make_arg(*batch, n, n)) + + +def sample_inputs_matrix_rank(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces inputs for matrix rank that test + all possible combinations for atol and rtol + """ + + def make_tol_arg(kwarg_type, inp): + if kwarg_type == "none": + return None + if kwarg_type == "float": + return 1.0 + assert kwarg_type == "tensor" + return torch.ones(inp.shape[:-2], device=device) + + for tol_type in ["float", "tensor"]: + for atol_type, rtol_type in product(["none", tol_type], repeat=2): + if ( + not atol_type and not rtol_type + ): # default behavior, so skipped here so it's not tested 2 extra times + continue + for sample in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad + ): + assert sample.kwargs == {} + sample.kwargs = { + "atol": make_tol_arg(atol_type, sample.input), + "rtol": make_tol_arg(rtol_type, sample.input), + } + yield sample + + # default kwargs + yield from sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + + +def sample_inputs_linalg_pinv_singular( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function produces factors `a` and `b` to generate inputs of the form `a @ b.t()` to + test the backward method of `linalg_pinv`. That way we always preserve the rank of the + input no matter the perturbations applied to it by the gradcheck. + Note that `pinv` is Frechet-differentiable in a rank-preserving neighborhood. + """ + batches = [(), (0,), (2,), (1, 1)] + # the size of at least 30 is required to cause failures for the previous implicit implementation + # of the pinv's backward method, albeit it is slow. + size = [0, 3, 50] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + # Note that by making the columns of `a` and `b` orthonormal we make sure that + # the product matrix `a @ b.t()` has condition number 1 when restricted to its image + a = ( + torch.rand(*batch, m, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + b = ( + torch.rand(*batch, n, k, device=device, dtype=dtype) + .qr() + .Q.requires_grad_(requires_grad) + ) + yield SampleInput(a, args=(b,)) + + +def sample_inputs_linalg_cond(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # autograd is not supported for inputs with zero number of elements + shapes = ( + (S, S), + (2, S, S), + (2, 1, S, S), + ) + + for shape in shapes: + yield SampleInput(make_arg(shape)) + + +def sample_inputs_linalg_vander(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + shapes = ( + (), + (1,), + (S,), + (2, S), + ) + + for shape in shapes: + if len(shape) > 0 and shape[-1] > 1: + yield SampleInput(make_arg(shape)) + n = shape[-1] if len(shape) > 0 else 1 + for i in range(3): + # n-1, n, n+1 + N = n + i - 1 + if N < 2: + continue + yield SampleInput(make_arg(shape), kwargs=dict(N=N)) + + +def np_vander_batched(x, N=None): + # Wrapper around np.vander that supports batches of 1 dimension (enough for the tests) + if x.ndim == 0: + x = x[np.newaxis] + if x.ndim == 1: + y = np.vander(x, N=N, increasing=True) + return y + else: + if N is None: + N = x.shape[-1] + y = np.vander(x.ravel(), N=N, increasing=True).reshape((*x.shape, N)) + return y + + +def sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + # Cholesky factorization is for positive-definite matrices + single_well_conditioned_matrix = random_well_conditioned_matrix( + S, S, dtype=dtype, device=device + ) + batch_well_conditioned_matrices = random_well_conditioned_matrix( + 2, S, S, dtype=dtype, device=device + ) + single_pd = single_well_conditioned_matrix @ single_well_conditioned_matrix.mH + batch_pd = batch_well_conditioned_matrices @ batch_well_conditioned_matrices.mH + + inputs = ( + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + single_pd, + batch_pd, + ) + test_cases = (torch.linalg.cholesky(a, upper=False) for a in inputs) + for l in test_cases: + # generated lower-triangular samples + l.requires_grad = requires_grad + yield SampleInput(l) # upper=False by default + yield SampleInput( + l.detach().clone().requires_grad_(requires_grad), kwargs=dict(upper=False) + ) + + # generate upper-triangular inputs + u = l.detach().clone().mT.contiguous().requires_grad_(requires_grad) + yield SampleInput(u, kwargs=dict(upper=True)) + + +def sample_inputs_linalg_ldl_factor( + op_info, device, dtype, requires_grad=False, **kwargs +): + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + + # Symmetric inputs + yield SampleInput( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # single matrix + yield SampleInput( + random_symmetric_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=False), + ) # batch of matrices + yield SampleInput( + torch.zeros(0, 0, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # 0x0 matrix + yield SampleInput( + torch.zeros(0, 2, 2, dtype=dtype, device=device), kwargs=dict(hermitian=False) + ) # zero batch of matrices + + # Hermitian inputs + # hermitian=True for complex inputs on CUDA is supported only with MAGMA 2.5.4+ + magma_254_available = device.type == "cuda" and _get_magma_version() >= (2, 5, 4) + if dtype.is_complex and (device.type == "cpu" or magma_254_available): + yield SampleInput( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # single matrix + yield SampleInput( + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + kwargs=dict(hermitian=True), + ) # batch of matrices + + +def sample_inputs_linalg_ldl_solve( + op_info, device, dtype, requires_grad=False, **kwargs +): + # Generate LDL factors of symmetric (and Hermitian on CPU) matrices + from torch.testing._internal.common_utils import ( + random_hermitian_pd_matrix, + random_symmetric_pd_matrix, + ) + + device = torch.device(device) + symmetric_inputs = ( + random_symmetric_pd_matrix(S, dtype=dtype, device=device), # single matrix + random_symmetric_pd_matrix( + S, 2, dtype=dtype, device=device + ), # batch of matrices + torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix + torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices + ) + hermitian_inputs = ( + ( + random_hermitian_pd_matrix(S, dtype=dtype, device=device), + random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), + ) + if device.type == "cpu" and dtype.is_complex + else () + ) + test_cases1 = ( + torch.linalg.ldl_factor_ex(a, hermitian=False) for a in symmetric_inputs + ) + test_cases2 = ( + torch.linalg.ldl_factor_ex(a, hermitian=True) for a in hermitian_inputs + ) + + # Symmetric case + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + for test_case in test_cases1: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=False)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=False) + ) + + # Hermitian case + for test_case in test_cases2: + factors, pivots, _ = test_case + factors.requires_grad = requires_grad + for B_batch_shape in ((), factors.shape[:-2]): + B = make_arg((*B_batch_shape, factors.shape[-1], S)) + yield SampleInput(factors, args=(pivots, B), kwargs=dict(hermitian=True)) + clone_factors = factors.detach().clone().requires_grad_(requires_grad) + yield SampleInput( + clone_factors, args=(pivots, B), kwargs=dict(hermitian=True) + ) + + +def sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs): + from torch.testing._internal.common_utils import random_well_conditioned_matrix + + device = torch.device(device) + + drivers: Tuple[str, ...] + if device.type == "cuda": + drivers = ("gels",) + else: + drivers = ("gels", "gelsy", "gelss", "gelsd") + + # we generate matrices of shape (..., n + delta, n) + deltas: Tuple[int, ...] + if device.type == "cpu" or has_cusolver(): + deltas = (-1, 0, +1) + # only square systems if Cusolver is not available + # becase we solve a lstsq problem with a transposed matrix in the backward + else: + deltas = (0,) + + for batch, driver, delta in product(((), (3,), (3, 3)), drivers, deltas): + shape = batch + (3 + delta, 3) + a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device) + a.requires_grad_(requires_grad) + b = make_tensor( + shape, + dtype=dtype, + device=device, + low=None, + high=None, + requires_grad=requires_grad, + ) + yield SampleInput(a, b, driver=driver) + + +def error_inputs_lstsq(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d,)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def error_inputs_lstsq_grad_oriented(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput( + SampleInput(zero_d, args=(zero_d, None)), + error_type=RuntimeError, + error_regex="at least 2 dimensions", + ) + + +def sample_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + # Shapes for 2D Tensors + shapes_2d = ((S, S), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((S, S, S),) + + kwargs_2d = ({}, dict(offset=2), dict(offset=2), dict(offset=1)) + kwargs_3d = ( + dict(offset=1, dim1=1, dim2=2), + dict(offset=2, dim1=0, dim2=1), + dict(offset=-2, dim1=0, dim2=1), + ) + + for shape, kwarg in chain( + product(shapes_2d, kwargs_2d), product(shapes_3d, kwargs_3d) + ): + yield SampleInput(make_arg(shape), kwargs=kwarg) + + +def error_inputs_diagonal_diag_embed(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + shapes1d = (0, 1, (0,), (1,)) + shapes2d = ((M, L),) + shapes3d = ((M, S, L),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 == dim2 is not allowed + dict(dim1=1, dim2=1), + # out of bounds dims are not allowed + dict(dim1=10000), + dict(dim2=10000), + ) + + kwargs3d = kwargs2d + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + arg = make_arg(shape) + sample = SampleInput(input=arg, kwargs=kwargs) + + dim1 = kwargs.get("dim1") + dim2 = kwargs.get("dim2") + + if "diagonal" in op_info.name: + num_dim = arg.dim() + elif op_info.name in ("diag_embed", "_refs.diag_embed"): + # these are valid inputs for diag_embed + if shape in ((0,), (1,)): + continue + num_dim = arg.dim() + 1 + else: + raise RuntimeError("should be unreachable") + + bound1 = -num_dim + bound2 = num_dim - 1 + dim_range = range(bound1, bound2 + 1) + dim1_cond = dim1 and dim1 not in dim_range + dim2_cond = dim2 and dim2 not in dim_range + + if dim1 == dim2: + err = f"diagonal dimensions cannot be identical {dim1}, {dim2}" + yield ErrorInput(sample, error_regex=err, error_type=RuntimeError) + elif dim1_cond or dim2_cond: + err_dim = dim1 if dim1_cond else dim2 + err = ( + r"Dimension out of range \(expected to be in range of " + rf"\[{bound1}, {bound2}\], but got {err_dim}\)" + ) + yield ErrorInput(sample, error_regex=err, error_type=IndexError) + else: + raise RuntimeError("should be unreachable") + + +def sample_inputs_linalg_cholesky( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates always positive-definite input for torch.linalg.cholesky using + random_hermitian_pd_matrix. + The input is generated as the itertools.product of 'batches' and 'ns'. + In total this function generates 8 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices, + (1, 1) - 1x1 batch of matrices + 'ns' gives 0x0 and 5x5 matrices. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + """ + from torch.testing._internal.common_utils import random_hermitian_pd_matrix + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 0] + for batch, n, upper in product(batches, ns, [True, False]): + a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device) + a.requires_grad = requires_grad + yield SampleInput(a, upper=upper) + + +def sample_inputs_linalg_eig(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eig + """ + + def out_fn(output): + return output[0], abs(output[1]) + + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.eigh/eigvalsh with UPLO="U" or "L" keyword argument. + """ + + def out_fn(output): + if isinstance(output, tuple): + # eigh function + return output[0], abs(output[1]) + else: + # eigvalsh function + return output + + # Samples do not need to be Hermitian, as we're using gradcheck_wrapper_hermitian_input + samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad) + for sample in samples: + # Note: we cannot use np.random.choice here as TorchDynamo + # does not support tensors of strings. + sample.kwargs = {"UPLO": random.choice(["L", "U"])} + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_pinv(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates input for torch.linalg.pinv with hermitian=False keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + real_dtype = o.input.real.dtype if dtype.is_complex else dtype + # requires_grad path for rtol tensor is not implemented + for rtol in (None, 1.0, torch.tensor(1.0, dtype=real_dtype, device=device)): + o = clone_sample(o) + o.kwargs = {"rtol": rtol} + yield o + + +def sample_inputs_linalg_pinv_hermitian( + op_info, device, dtype, requires_grad=False, **kwargs +): + """ + This function generates input for torch.linalg.pinv with hermitian=True keyword argument. + """ + for o in sample_inputs_linalg_invertible( + op_info, device, dtype, requires_grad, **kwargs + ): + o.kwargs = {"hermitian": True} + yield o + + +def sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs +): + """ + This function generates always solvable input for torch.linalg.solve + We sample a fullrank square matrix (i.e. invertible) A + The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'. + The second input is generated as the product of 'batches', 'ns' and 'nrhs'. + In total this function generates 18 SampleInputs + 'batches' cases include: + () - single input, + (0,) - zero batched dimension, + (2,) - batch of two matrices. + 'ns' gives 0x0 and 5x5 matrices. + and 'nrhs' controls the number of vectors to solve for: + () - using 1 as the number of vectors implicitly + (1,) - same as () but explicit + (3,) - solve for 3 vectors. + Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes. + 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs. + torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow + 1D tensors (vectors) as the right-hand-side. + Once torch.solve / triangular_solve / cholesky_solve and its testing are removed, + 'vector_rhs_allowed' may be removed here as well. + """ + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_a = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + make_b = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,)] + ns = [5, 0] + if vector_rhs_allowed: + nrhs = [(), (1,), (3,)] + else: + nrhs = [(1,), (3,)] + + for n, batch, rhs in product(ns, batches, nrhs): + yield SampleInput(make_a(*batch, n, n), args=(make_b(batch + (n,) + rhs),)) + + +def sample_inputs_linalg_solve_triangular( + op_info, device, dtype, requires_grad=False, **kwargs +): + make_arg = partial(make_tensor, dtype=dtype, device=device) + bs = (1, 2, 0) + ns = (3, 0) + ks = (1, 3, 0) + + for b, n, k, (left, upper, uni) in product( + bs, ns, ks, product((True, False), repeat=3) + ): + if b == 1: + A = make_arg((n, n)) if left else make_arg((k, k)) + B = make_arg((n, k)) + else: + A = make_arg((b, n, n)) if left else make_arg((b, k, k)) + B = make_arg((b, n, k)) + if uni: + # Not really necessary, but writing it for consistency + A.diagonal(0, -2, -1).fill_(1.0) + else: + d = A.diagonal(0, -2, -1) + d[d.abs() < 1e-6] = 1.0 + if upper: + A.triu_() + else: + A.tril_() + kwargs = {"upper": upper, "left": left, "unitriangular": uni} + if requires_grad: + for grad_A, grad_B in product((True, False), repeat=2): + # Either A or B needs to have a gradient + if not grad_A and not grad_B: + continue + yield SampleInput( + A.clone().requires_grad_(grad_A), + args=(B.clone().requires_grad_(grad_B),), + kwargs=kwargs, + ) + else: + yield SampleInput(A, args=(B,), kwargs=kwargs) + + +def sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function generates always solvable input for legacy solve functions + (the ones that are not in torch.linalg module). + The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation + should have b.ndim >= 2, vectors are not allowed. + Also the arguments order is swapped. + """ + out = sample_inputs_linalg_solve( + op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False + ) + + def out_fn(output): + return output[0] + + # Reverses tensor order + for sample in out: + sample.input, sample.args = sample.args[0], (sample.input,) + if op_info.name == "solve": + sample.output_process_fn_grad = out_fn + yield sample + + +def sample_inputs_linalg_lu(op_info, device, dtype, requires_grad=False, **kwargs): + full_rank = op_info.name == "linalg.lu_factor" + make_fn = ( + make_tensor + if not full_rank + else make_fullrank_matrices_with_distinct_singular_values + ) + make_arg = partial(make_fn, dtype=dtype, device=device, requires_grad=requires_grad) + + def out_fn(output): + if op_info.name == "linalg.lu": + return output[1], output[2] + else: + return output + + batch_shapes = ((), (3,), (3, 3)) + # pivot=False only supported in CUDA + pivots = (True, False) if torch.device(device).type == "cuda" else (True,) + deltas = (-2, -1, 0, +1, +2) + for batch_shape, pivot, delta in product(batch_shapes, pivots, deltas): + shape = batch_shape + (S + delta, S) + # Insanely annoying that make_fullrank_blablabla accepts a *shape and not a tuple! + A = make_arg(shape) if not full_rank else make_arg(*shape) + yield SampleInput(A, kwargs={"pivot": pivot}, output_process_fn_grad=out_fn) + + +def sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, m, n in product(batches, ns, ns): + yield SampleInput(make_arg(batch + (m, n))) + + +def sample_inputs_linalg_qr_geqrf( + op_info, device, dtype, requires_grad=False, **kwargs +): + # QR is just well defined when the matrix is full rank + make_fullrank = make_fullrank_matrices_with_distinct_singular_values + make_arg = partial( + make_fullrank, dtype=dtype, device=device, requires_grad=requires_grad + ) + + batches = [(), (0,), (2,), (1, 1)] + ns = [5, 2, 0] + + for batch, (m, n) in product(batches, product(ns, ns)): + shape = batch + (m, n) + yield SampleInput(make_arg(*shape)) + + +def sample_inputs_tensorsolve(op_info, device, dtype, requires_grad, **kwargs): + a_shapes = [(2, 3, 6), (3, 4, 4, 3)] + # Zero-dim tensors are not supported in NumPy, so we skip them for now. + # NumPy is used in reference check tests. + # See https://github.com/numpy/numpy/pull/20482 for tracking NumPy bugfix. + # a_shapes += [(0, 0, 1, 2, 3, 0)] + dimss = [None, (0, 2)] + + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad + ) + for a_shape, dims in itertools.product(a_shapes, dimss): + a = make_arg(a_shape) + b = make_arg(a_shape[:2]) + yield SampleInput(a, b, dims=dims) + + +def sample_inputs_tensorinv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = make_fullrank_matrices_with_distinct_singular_values + + def make_input(): + return make_arg(12, 12, device=device, dtype=dtype, requires_grad=requires_grad) + + # lhs / rhs shape can have any number of dimensions as long as their product equals 12 + shapes = [ + ((2, 2, 3), (12, 1)), + ((4, 3), (6, 1, 2)), + ] + + for shape_lhs, shape_rhs in shapes: + inp = make_input().reshape(*shape_lhs, *shape_rhs).detach() + inp.requires_grad_(requires_grad) + yield SampleInput(inp, ind=len(shape_lhs)) + + +op_db: List[OpInfo] = [ + OpInfo( + "linalg.cross", + ref=lambda x, y, dim=-1: np.cross(x, y, axis=dim), + op=torch.linalg.cross, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + aten_name="linalg_cross", + sample_inputs_func=sample_inputs_cross, + error_inputs_func=error_inputs_cross, + supports_out=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + check_batched_gradgrad=False, + ), + OpInfo( + "linalg.det", + aten_name="linalg_det", + op=torch.linalg.det, + variant_test_name="singular", + aliases=("det",), + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_det_singular, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("The backward may give different results"), + "TestCommon", + "test_noncontiguous_samples", + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestBwdGradients", + "test_fn_grad", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + DecorateInfo( + unittest.skip("Gradients are incorrect on macos"), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cpu", + dtypes=(torch.float64,), + active_if=IS_MACOS, + ), + # Both Hessians are incorrect on complex inputs?? + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + dtypes=(torch.complex128,), + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip("Skipped, see https://github.com//issues/84192"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93044" + ), + "TestBwdGradients", + "test_fn_grad", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip( + "Flaky on ROCm https://github.com/pytorch/pytorch/issues/93045" + ), + "TestFwdGradients", + "test_forward_mode_AD", + device_type="cuda", + dtypes=get_all_complex_dtypes(), + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.diagonal", + aten_name="linalg_diagonal", + aten_backward_name="diagonal_backward", + dtypes=all_types_and_complex_and( + torch.bool, torch.bfloat16, torch.float16, torch.chalf + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed, + ), + OpInfo( + "linalg.cholesky", + aten_name="linalg_cholesky", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.cholesky_ex", + aten_name="linalg_cholesky_ex", + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vecdot", + aten_name="linalg_vecdot", + ref=lambda x, y, *, dim=-1: (x.conj() * y).sum(dim), + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_linalg_vecdot, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + "TestSchemaCheckModeOpInfo", + "test_schema_correctness", + dtypes=(torch.complex64, torch.complex128), + ), + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1.2e-2, rtol=1.7e-2)}), + "TestInductorOpInfo", + "test_comprehensive", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.cond", + aten_name="linalg_cond", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cond, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.eig", + aten_name="linalg_eig", + op=torch.linalg.eig, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eig, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_out", device_type="cpu" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + ), + OpInfo( + "linalg.eigvals", + aten_name="linalg_eigvals", + op=torch.linalg.eigvals, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigh", + aten_name="linalg_eigh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.eigvalsh", + aten_name="linalg_eigvalsh", + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_eigh, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.householder_product", + aten_name="linalg_householder_product", + op=torch.linalg.householder_product, + aliases=("orgqr",), + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + # TODO: backward uses in-place operations that vmap doesn't like + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_householder_product, + decorators=[ + skipCUDAIfNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-3, rtol=1e-3)}) + ), + DecorateInfo( + unittest.skip("Skipped! Flaky"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cpu", + dtypes=(torch.complex128,), + ), + ], + ), + OpInfo( + "linalg.ldl_factor", + aten_name="linalg_ldl_factor", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_factor_ex", + aten_name="linalg_ldl_factor_ex", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_factor, + decorators=[skipCUDAIfNoMagmaAndNoLinalgsolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.ldl_solve", + aten_name="linalg_ldl_solve", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_ldl_solve, + decorators=[ + skipCUDAIf( + _get_torch_cuda_version() < (11, 4), "not available before CUDA 11.3.1" + ), + skipCUDAIfNoCusolver, + skipCUDAIfRocm, + skipCPUIfNoLapack, + ], + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + dtypes=floating_and_complex_types(), + supports_out=True, + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # we skip gradient checks for this suite as they are tested in + # variant_test_name='grad_oriented' + DecorateInfo(unittest.skip("Skipped!"), "TestFwdGradients"), + DecorateInfo(unittest.skip("Skipped!"), "TestBwdGradients"), + # The values for attribute 'shape' do not match + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.lstsq", + aten_name="linalg_lstsq", + variant_test_name="grad_oriented", + # gradchecks for forward AD fails with multi-Tensor outputs + op=lambda a, b, driver: torch.linalg.lstsq(a, b, driver=driver)[0], + supports_out=False, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_lstsq, + error_inputs_func=error_inputs_lstsq_grad_oriented, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # tests do not work with passing lambda for op + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + ), + ), + OpInfo( + "linalg.matrix_power", + aliases=("matrix_power",), + aten_name="linalg_matrix_power", + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_power, + ), + OpInfo( + "linalg.multi_dot", + # Need this lambda because gradcheck does not work with TensorList inputs + aten_name="linalg_multi_dot", + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_inplace_autograd=False, + # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407) + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_multi_dot, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo( + unittest.skip("67470!"), "TestCommon", "test_noncontiguous_samples" + ), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo( + unittest.skip("Skipped!"), + "TestOpInfo", + device_type="xla", + dtypes=(torch.long,), + ), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + device_type="cpu", + dtypes=(torch.long,), + ), + ), + ), + # NB: linalg.norm has two variants so that different skips can be used for different sample inputs + OpInfo( + "linalg.norm", + aten_name="linalg_norm", + op=torch.linalg.norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_norm, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.norm", + op=torch.linalg.norm, + variant_test_name="subgradients_at_zero", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=partial( + sample_inputs_linalg_norm, variant="subgradient_at_zero" + ), + aten_name="linalg_norm", + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # [NEW] Skips specifically for sample inputs at zero + # norm's vjp/jvp are not well-conditioned near zero + DecorateInfo( + unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_fn_fwgrad_bwgrad" + ), + DecorateInfo( + unittest.expectedFailure, "TestFwdGradients", "test_forward_mode_AD" + ), + DecorateInfo(unittest.expectedFailure, "TestBwdGradients", "test_fn_grad"), + ), + ), + OpInfo( + "linalg.matrix_norm", + aten_name="linalg_matrix_norm", + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + sample_inputs_func=sample_inputs_linalg_matrix_norm, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.qr", + aten_name="linalg_qr", + op=torch.linalg.qr, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.slogdet", + aten_name="linalg_slogdet", + op=torch.linalg.slogdet, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + ), + OpInfo( + "linalg.vander", + aten_name="linalg_vander", + ref=np_vander_batched, + op=torch.linalg.vander, + dtypes=all_types_and_complex(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_vander, + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + ReductionOpInfo( + "linalg.vector_norm", + op=torch.linalg.vector_norm, + identity=0, + nan_policy="propagate", + supports_multiple_dims=True, + complex_to_real=True, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + generate_args_kwargs=sample_kwargs_vector_norm, + aten_name="linalg_vector_norm", + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + OpInfo( + "linalg.lu_factor", + aten_name="linalg_lu_factor", + op=torch.linalg.lu_factor, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_factor_ex", + aten_name="linalg_lu_factor_ex", + op=torch.linalg.lu_factor_ex, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu", + aten_name="linalg_lu", + op=torch.linalg.lu, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # linalg.lu_factor: LU without pivoting is not implemented on the CPU + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + OpInfo( + "linalg.lu_solve", + op=torch.linalg.lu_solve, + aten_name="linalg_lu_solve", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo( + unittest.skip("Tests different backward paths"), + "TestCommon", + "test_floating_inputs_are_differentiable", + ), + ), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + ), + OpInfo( + "linalg.inv", + aten_name="linalg_inv", + op=torch.linalg.inv, + aliases=("inverse",), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.inv_ex", + aten_name="linalg_inv_ex", + op=torch.linalg.inv_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_invertible, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve", + aten_name="linalg_solve", + op=torch.linalg.solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_ex", + aten_name="linalg_solve_ex", + op=torch.linalg.solve_ex, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=6e-04)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.solve_triangular", + aten_name="linalg_solve_triangular", + op=torch.linalg.solve_triangular, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_solve_triangular, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + # linalg.solve_triangular cannot be batched over because of a call to out.copy_(result); + supports_forward_ad=True, + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_matrix_rank, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + # jit doesn't accept tensor inputs for matrix rank + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=[torch.complex64, torch.float32], + ), + ), + ), + OpInfo( + "linalg.matrix_rank", + aten_name="linalg_matrix_rank", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + supports_autograd=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + op=torch.linalg.pinv, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # errors with "leaked XXXX bytes CUDA memory on device 0" + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="singular", + # pinv is Frechet-differentiable in a rank-preserving neighborhood, + # so we feed inputs that are the products of two full-rank factors, + # to avoid any rank changes caused by the perturbations in the gradcheck + op=lambda a, b: torch.linalg.pinv(a @ b.mT), + dtypes=floating_and_complex_types(), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_pinv_singular, + # Only large tensors show issues with implicit backward used prior to + # explicit backward implementation. + decorators=[slowTest, skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # CUDA runs out of memory + DecorateInfo( + unittest.skip("Skipped!"), + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + # This test takes almost 2 hours to run! + DecorateInfo( + unittest.skip("Skipped!"), + "TestBwdGradients", + "test_fn_gradgrad", + device_type="cuda", + dtypes=[torch.cdouble], + ), + ), + ), + OpInfo( + "linalg.pinv", + aten_name="linalg_pinv", + variant_test_name="hermitian", + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_linalg_pinv_hermitian, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + # This test is flaky under slow gradcheck, likely due to rounding issues + DecorateInfo( + skipIfSlowGradcheckEnv, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="cuda", + ), + ), + ), + OpInfo( + "linalg.svd", + op=torch.linalg.svd, + aten_name="linalg_svd", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_svd, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_out", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="mps", + dtypes=[torch.float32], + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.svdvals", + op=torch.linalg.svdvals, + aten_name="linalg_svdvals", + decomp_aten_name="_linalg_svd", + dtypes=floating_and_complex_types(), + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # We're using at::allclose, which does not have a batching rule + check_batched_gradgrad=False, + sample_inputs_func=sample_inputs_linalg_svdvals, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestFakeTensor", + "test_fake_crossref_backward_no_amp", + device_type="cuda", + dtypes=[torch.float32], + active_if=TEST_WITH_ROCM, + ), + ), + ), + OpInfo( + "linalg.tensorinv", + ref=np.linalg.tensorinv, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorinv, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + OpInfo( + "linalg.tensorsolve", + ref=lambda a, b, dims=None: np.linalg.tensorsolve(a, b, axes=dims), + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_tensorsolve, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + skipCUDAIfNoMagmaAndNoCusolver, + skipCPUIfNoLapack, + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cuda", + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=8e-04, rtol=7e-06)}), + "TestCommon", + "test_noncontiguous_samples", + device_type="cpu", + ), + ], + skips=( + DecorateInfo( + unittest.skip("Unsupported on MPS for now"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), +] + +python_ref_db: List[OpInfo] = [ + # + # torch.linalg + # + PythonRefInfo( + "_refs.linalg.cross", + torch_opinfo_name="linalg.cross", + supports_out=True, + op_db=op_db, + skips=( + # TODO: is this really needed? + DecorateInfo( + unittest.expectedFailure, "TestCommon", "test_python_ref_errors" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.diagonal", + torch_opinfo_name="linalg.diagonal", + supports_out=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.vecdot", + torch_opinfo_name="linalg.vecdot", + op_db=op_db, + ), + ReductionPythonRefInfo( + "_refs.linalg.vector_norm", + torch_opinfo_name="linalg.vector_norm", + supports_out=True, + op_db=op_db, + skips=( + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, "TestReductions", "test_dim_empty"), + DecorateInfo( + unittest.expectedFailure, "TestReductions", "test_dim_empty_keepdim" + ), + ), + ), + PythonRefInfo( + "_refs.linalg.matrix_norm", + torch_opinfo_name="linalg.matrix_norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.norm", + torch_opinfo_name="linalg.norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svd", + torch_opinfo_name="linalg.svd", + supports_out=True, + op_db=op_db, + ), + PythonRefInfo( + "_refs.linalg.svdvals", + torch_opinfo_name="linalg.svdvals", + supports_out=True, + op_db=op_db, + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py new file mode 100644 index 0000000000000000000000000000000000000000..ea678c2e4f87508f08833c13ce55873032bcb5bf --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/nested.py @@ -0,0 +1,305 @@ +# mypy: ignore-errors + +from copy import copy +from functools import partial + +import torch +from torch.testing._internal.common_methods_invocations import op_db +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + ReductionOpInfo, + SampleInput, + UnaryUfuncInfo, +) +from torch.utils._pytree import tree_map + + +# random integer used for sizes +def _rnd(): + return torch.randint(3, 8, ()).item() + + +def _raggedness_matches(nt1, nt2): + return ( + nt1.is_nested + and nt2.is_nested + and nt1._ragged_idx == nt2._ragged_idx + and nt1.shape[nt1._ragged_idx] == nt2.shape[nt2._ragged_idx] + ) + + +# Generates a random NT. +# dims should be something like [5, None, 10], with None indicating that a +# random ragged structure should be used +def random_nt_from_dims( + dims, device=None, dtype=None, layout=torch.strided, requires_grad=False +): + sizes = [[d if d is not None else _rnd() for d in dims[1:]] for d in range(dims[0])] + return torch.nested.nested_tensor( + [torch.randn(*size) for size in sizes], + device=device, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + ) + + +# Helper function for generating a comprehensive set of NJT sample inputs. +def _sample_njts(device, dtype, requires_grad=False, dims=None): + if dims is None: + dims = [2, 3, 4] + if not isinstance(dims, (list, tuple)): + dims = [dims] + + # contiguous NJTs + for dim in dims: + # with min / max seqlen cached + shape = (_rnd(), None, *[_rnd() for _ in range(dim - 2)]) + nt = random_nt_from_dims( + shape, + device=device, + dtype=dtype, + requires_grad=requires_grad, + layout=torch.jagged, + ) + yield nt + + # without min / max seqlen cached + values = nt.values().clone().detach() + offsets = nt.offsets().clone().detach() + yield torch.nested.nested_tensor_from_jagged(values, offsets) + + # TODO: add non-contiguous NJTs + + +# Computes an unbind-based reference for a given OpInfo on a given SampleInput. +# This reference unbinds the input NJT and invokes the op on each of the components, +# optionally wrapping the result in an NJT. +def unbind_reference(op, sample, wrap_output_as_njt=True): + assert sample.input.is_nested + out_ref_components = [] + for i, component in enumerate(sample.input.unbind(dim=0)): + + def _slice_njts(t, i=i, inp=sample.input): + # any NJT with the same ragged structure as the input should + # also be sliced to pass to the reference + if isinstance(t, torch.Tensor) and _raggedness_matches(t, inp): + return t[i] + else: + return t + + args = tree_map(_slice_njts, sample.args) + kwargs = tree_map(_slice_njts, sample.kwargs) + + from torch._prims_common import canonicalize_dims + + # Need to adjust dim to apply on NJT component + if "dim" in kwargs: + kwargs["dim"] = canonicalize_dims(sample.input.dim(), kwargs["dim"]) - 1 + assert kwargs["dim"] >= 0 + + # TODO: handle this + assert "dims" not in kwargs + + out_ref_component = op.op(component, *args, **kwargs) + + # TODO: handle list / tuple / non-NJT outputs + assert not isinstance(out_ref_component, (list, tuple)) + out_ref_components.append(out_ref_component) + + if wrap_output_as_njt: + return torch.nested.as_nested_tensor(out_ref_components, layout=torch.jagged) + + return out_ref_components + + +# Computes the reference value for a reduction op. +def reduction_reference(op, sample): + assert sample.input.is_nested + dim = sample.kwargs.get("dim", None) + keepdim = sample.kwargs.get("keepdim", False) + assert dim != 0, "reductions over the batch dim are not supported" + assert "dims" not in sample.kwargs + assert sample.input._ragged_idx == 1 + + if dim is None: + # calculate reference value by running reduction on values buffer + return op.op(sample.input.values(), *sample.args, **sample.kwargs) + + if dim == sample.input._ragged_idx: + # calculate reference value by running an unbind reference and stacking + out_ref_components = unbind_reference(op, sample, wrap_output_as_njt=False) + return torch.stack(out_ref_components, dim=0) + + # unbind reference works for other reductions + return unbind_reference(op, sample) + + +def sample_inputs_elementwise_njt_unary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + yield SampleInput(njt, kwargs=dict(op_kwargs)) + + +def sample_inputs_elementwise_njt_binary( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt1 in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + # TODO: account for non-contiguous NJTs here + # TODO: provide sample inputs for broadcasting cases and mixed (NT, T), (T, NT) inputs + njt2 = torch.randn_like(njt1) + yield SampleInput(njt1, args=(njt2,), kwargs=dict(op_kwargs)) + + +def sample_inputs_njt_reduction( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + if not op_kwargs: + op_kwargs = {} + + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + # dim-wise reduction; includes reduction over the ragged dim + # NB: reduction over the batch dim is not supported! + # TODO: Cover this in the set of error inputs + for dim in range(1, njt.dim()): + for keepdim in [False, True]: + yield SampleInput( + njt, kwargs={**op_kwargs, "dim": dim, "keepdim": keepdim} + ) + + # full reduction + yield SampleInput(njt, kwargs=dict(op_kwargs)) + + +def unsupported_sample_inputs_func(op_name): + def _f(op_info, device, dtype, requires_grad, op_name=op_name, **kwargs): + raise RuntimeError( + f"OpInfo for {op_name} does not support NJT. Support can be added by modifying " + "torch/testing/_internal/opinfo/definitions/nested.py." + ) + + return _f + + +def unsupported_reference(op_name): + def _f(op, sample): + raise RuntimeError( + f"OpInfo for {op_name} does not define a ref() function. Support can be added by " + "modifying torch/testing/_internal/opinfo/definitions/nested.py." + ) + + return _f + + +# === BEGIN OP-SPECIFIC SAMPLE INPUTS FUNCS === +def sample_inputs_clone(op_info, device, dtype, requires_grad, **kwargs): + # non-contiguous NJTs + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2, 3, 4] + ): + yield SampleInput(njt) + + for memory_format in (torch.contiguous_format, torch.preserve_format): + # construct a "non-contiguous with holes" NJT + values = torch.randn( + 10, 5, device=device, dtype=dtype, requires_grad=requires_grad + ) + offsets = torch.tensor([0, 2, 4, 10], device=device, dtype=torch.int64) + lengths = torch.tensor([2, 1, 3], device=device, dtype=torch.int64) + njt = torch.nested.nested_tensor_from_jagged( + values, offsets=offsets, lengths=lengths + ) + + yield SampleInput(njt, kwargs={"memory_format": memory_format}) + + +def sample_inputs_mvl_gamma(p): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"p": p}) + + +def sample_inputs_polygamma_n(n): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n}) + + +def sample_inputs_special_polygamma_n(n): + return partial(sample_inputs_elementwise_njt_unary, op_kwargs={"n": n}) + + +def sample_inputs_masked_select( + op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs +): + for njt in _sample_njts( + device=device, dtype=dtype, requires_grad=requires_grad, dims=[2] + ): + yield SampleInput( + njt, kwargs={"mask": (torch.randn_like(njt, requires_grad=False) < 0.0)} + ) + + +sample_inputs_nn_functional_threshold = partial( + sample_inputs_elementwise_njt_unary, + op_kwargs={"threshold": float.fromhex("0x1.3ap-3"), "value": -9}, +) +# === END OP-SPECIFIC SAMPLE INPUTS FUNCS === + + +# Mapping of OpInfo full names -> sample_inputs_funcs, which define the set of sample inputs +# (involving NJTs) to pass to the op. Full name consists of the OpInfo's name and variant name +# separated by a period (e.g. special.polygamma.special_polygamma_n_0). These are necessary +# to specify if they cannot be auto-generated for some reason. Try to keep these sorted +# in alphabetical order! +njt_sample_inputs = { + "clone": sample_inputs_clone, + **{f"mvlgamma.mvlgamma_p_{p}": sample_inputs_mvl_gamma(p=1) for p in (1, 3, 5)}, + "nn.functional.threshold": sample_inputs_nn_functional_threshold, + **{f"polygamma.polygamma_n_{n}": sample_inputs_polygamma_n(n=n) for n in range(5)}, + "special.polygamma.special_polygamma_n_0": sample_inputs_special_polygamma_n(n=0), + "masked_select": sample_inputs_masked_select, +} + + +# Translates an OpInfo entry to one that operates on NJTs. +def translate_opinfo(op): + new_op = copy(op) + new_op.supports_njt = True + + if op.full_name in njt_sample_inputs: + new_op.sample_inputs_func = njt_sample_inputs[op.full_name] + # TODO: make the reference customizeable + new_op.ref = unbind_reference + elif isinstance(op, UnaryUfuncInfo): + new_op.sample_inputs_func = partial( + sample_inputs_elementwise_njt_unary, op_kwargs=None + ) + new_op.ref = unbind_reference + elif isinstance(op, BinaryUfuncInfo): + new_op.sample_inputs_func = partial( + sample_inputs_elementwise_njt_binary, op_kwargs=None + ) + new_op.ref = unbind_reference + elif isinstance(op, ReductionOpInfo): + new_op.sample_inputs_func = partial(sample_inputs_njt_reduction, op_kwargs=None) + new_op.ref = reduction_reference + # TODO: Translate the rest of the OpInfos + else: + new_op.sample_inputs_func = unsupported_sample_inputs_func(op.full_name) + new_op.ref = unsupported_reference(op.full_name) + new_op.supports_njt = False + + return new_op + + +njt_op_db = [translate_opinfo(op) for op in op_db] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..105590a71fb7d972bcbdfc2bfaaf3f59fbc4e335 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/signal.py @@ -0,0 +1,458 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from itertools import product +from typing import Callable, List, Tuple + +import numpy + +import torch +from torch.testing._internal.common_dtype import floating_types +from torch.testing._internal.common_utils import TEST_SCIPY +from torch.testing._internal.opinfo.core import ( + DecorateInfo, + ErrorInput, + OpInfo, + SampleInput, +) + + +if TEST_SCIPY: + import scipy.signal + + +def sample_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Base function used to create sample inputs for windows. + + For additional required args you should use *args, as well as **kwargs for + additional keyword arguments. + """ + + # Tests window sizes up to 5 samples. + for size, sym in product(range(6), (True, False)): + yield SampleInput( + size, + *args, + sym=sym, + device=device, + dtype=dtype, + requires_grad=requires_grad, + **kwargs, + ) + + +def reference_inputs_window(op_info, device, dtype, requires_grad, *args, **kwargs): + r"""Reference inputs function to use for windows which have a common signature, i.e., + window size and sym only. + + Implement other special functions for windows that have a specific signature. + See exponential and gaussian windows for instance. + """ + yield from sample_inputs_window( + op_info, device, dtype, requires_grad, *args, **kwargs + ) + + cases = (8, 16, 32, 64, 128, 256) + + for size in cases: + yield SampleInput(size, sym=False) + yield SampleInput(size, sym=True) + + +def reference_inputs_exponential_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"center": 4, "tau": 0.5}), + (16, {"center": 8, "tau": 2.5}), + (32, {"center": 16, "tau": 43.5}), + (64, {"center": 20, "tau": 3.7}), + (128, {"center": 62, "tau": 99}), + (256, {"tau": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + kw["center"] = None + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_gaussian_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"std": 0.1}), + (16, {"std": 1.2}), + (32, {"std": 2.1}), + (64, {"std": 3.9}), + (128, {"std": 4.5}), + (256, {"std": 10}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_kaiser_window(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"beta": 2}), + (16, {"beta": 12}), + (32, {"beta": 30}), + (64, {"beta": 35}), + (128, {"beta": 41.2}), + (256, {"beta": 100}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_cosine_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"a": [0.5, 0.5]}), + (16, {"a": [0.46, 0.54]}), + (32, {"a": [0.46, 0.23, 0.31]}), + (64, {"a": [0.5]}), + (128, {"a": [0.1, 0.8, 0.05, 0.05]}), + (256, {"a": [0.2, 0.2, 0.2, 0.2, 0.2]}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def reference_inputs_general_hamming_window( + op_info, device, dtype, requires_grad, **kwargs +): + yield from sample_inputs_window(op_info, device, dtype, requires_grad, **kwargs) + + cases = ( + (8, {"alpha": 0.54}), + (16, {"alpha": 0.5}), + (32, {"alpha": 0.23}), + (64, {"alpha": 0.8}), + (128, {"alpha": 0.9}), + (256, {"alpha": 0.05}), + ) + + for size, kw in cases: + yield SampleInput(size, sym=False, **kw) + yield SampleInput(size, sym=True, **kw) + + +def error_inputs_window(op_info, device, *args, **kwargs): + # Tests for windows that have a negative size + yield ErrorInput( + SampleInput(-1, *args, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="requires non-negative window length, got M=-1", + ) + + # Tests for window tensors that are not torch.strided, for instance, torch.sparse_coo. + yield ErrorInput( + SampleInput( + 3, + *args, + layout=torch.sparse_coo, + device=device, + dtype=torch.float32, + **kwargs, + ), + error_type=ValueError, + error_regex="is implemented for strided tensors only, got: torch.sparse_coo", + ) + + # Tests for window tensors that are not floating point dtypes, for instance, torch.long. + yield ErrorInput( + SampleInput(3, *args, dtype=torch.long, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.int64", + ) + + # Tests for window tensors that are bfloat16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.bfloat16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.bfloat16", + ) + + # Tests for window tensors that are float16 + yield ErrorInput( + SampleInput(3, *args, dtype=torch.float16, device=device, **kwargs), + error_type=ValueError, + error_regex="expects float32 or float64 dtypes, got: torch.float16", + ) + + +def error_inputs_exponential_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, **kwargs) + + # Tests for negative decay values. + yield ErrorInput( + SampleInput(3, tau=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Tau must be positive, got: -1 instead.", + ) + + # Tests for symmetric windows and a given center value. + yield ErrorInput( + SampleInput(3, center=1, sym=True, dtype=torch.float32, device=device), + error_type=ValueError, + error_regex="Center must be None for symmetric windows", + ) + + +def error_inputs_gaussian_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, std=0.5, **kwargs) + + # Tests for negative standard deviations + yield ErrorInput( + SampleInput(3, std=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Standard deviation must be positive, got: -1 instead.", + ) + + +def error_inputs_kaiser_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, beta=12, **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, beta=-1, dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="beta must be non-negative, got: -1 instead.", + ) + + +def error_inputs_general_cosine_window(op_info, device, **kwargs): + # Yield common error inputs + yield from error_inputs_window(op_info, device, a=[0.54, 0.46], **kwargs) + + # Tests for negative beta + yield ErrorInput( + SampleInput(3, a=None, dtype=torch.float32, device=device, **kwargs), + error_type=TypeError, + error_regex="Coefficients must be a list/tuple", + ) + + yield ErrorInput( + SampleInput(3, a=[], dtype=torch.float32, device=device, **kwargs), + error_type=ValueError, + error_regex="Coefficients cannot be empty", + ) + + +def reference_signal_window(fn: Callable): + r"""Wrapper for scipy signal window references. + + Discards keyword arguments for window reference functions that don't have a matching signature with + torch, e.g., gaussian window. + """ + + def _fn( + *args, + dtype=numpy.float64, + device=None, + layout=torch.strided, + requires_grad=False, + **kwargs, + ): + r"""The unused arguments are defined to disregard those values""" + return fn(*args, **kwargs).astype(dtype) + + return _fn + + +def make_signal_windows_opinfo( + name: str, + ref: Callable, + sample_inputs_func: Callable, + reference_inputs_func: Callable, + error_inputs_func: Callable, + *, + skips: Tuple[DecorateInfo, ...] = (), +): + r"""Helper function to create OpInfo objects related to different windows.""" + return OpInfo( + name=name, + ref=ref if TEST_SCIPY else None, + dtypes=floating_types(), + dtypesIfCUDA=floating_types(), + sample_inputs_func=sample_inputs_func, + reference_inputs_func=reference_inputs_func, + error_inputs_func=error_inputs_func, + supports_out=False, + supports_autograd=False, + skips=( + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, + "TestOperatorSignatures", + "test_get_torch_func_signature_exhaustive", + ), + # fails to match any schemas despite working in the interpreter + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + # skip these tests since we have non tensor input + DecorateInfo( + unittest.skip("Skipped!"), "TestCommon", "test_noncontiguous_samples" + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestCommon", + "test_variant_consistency_eager", + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_conj_view"), + DecorateInfo( + unittest.skip("Skipped!"), "TestMathBits", "test_neg_conj_view" + ), + DecorateInfo(unittest.skip("Skipped!"), "TestMathBits", "test_neg_view"), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_vmap_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestVmapOperatorsOpInfo", + "test_op_has_batch_rule", + ), + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + *skips, + ), + ) + + +op_db: List[OpInfo] = [ + make_signal_windows_opinfo( + name="signal.windows.hamming", + ref=reference_signal_window(scipy.signal.windows.hamming) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.hann", + ref=reference_signal_window(scipy.signal.windows.hann) if TEST_SCIPY else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.bartlett", + ref=reference_signal_window(scipy.signal.windows.bartlett) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.blackman", + ref=reference_signal_window(scipy.signal.windows.blackman) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.cosine", + ref=reference_signal_window(scipy.signal.windows.cosine) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.exponential", + ref=reference_signal_window(scipy.signal.windows.exponential) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, tau=2.78), + reference_inputs_func=partial(reference_inputs_exponential_window, tau=2.78), + error_inputs_func=error_inputs_exponential_window, + ), + make_signal_windows_opinfo( + name="signal.windows.gaussian", + ref=reference_signal_window(scipy.signal.windows.gaussian) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, std=1.92), + reference_inputs_func=partial(reference_inputs_gaussian_window, std=1.92), + error_inputs_func=error_inputs_gaussian_window, + skips=( + DecorateInfo( + unittest.skip("Buggy on MPS for now (mistakenly promotes to float64)"), + "TestCommon", + "test_numpy_ref_mps", + ), + ), + ), + make_signal_windows_opinfo( + name="signal.windows.kaiser", + ref=reference_signal_window(scipy.signal.windows.kaiser) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, beta=12.0), + reference_inputs_func=partial(reference_inputs_kaiser_window, beta=12.0), + error_inputs_func=error_inputs_kaiser_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_cosine", + ref=reference_signal_window(scipy.signal.windows.general_cosine) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, a=[0.54, 0.46]), + reference_inputs_func=partial( + reference_inputs_general_cosine_window, a=[0.54, 0.46] + ), + error_inputs_func=error_inputs_general_cosine_window, + ), + make_signal_windows_opinfo( + name="signal.windows.general_hamming", + ref=reference_signal_window(scipy.signal.windows.general_hamming) + if TEST_SCIPY + else None, + sample_inputs_func=partial(sample_inputs_window, alpha=0.54), + reference_inputs_func=partial( + reference_inputs_general_hamming_window, alpha=0.54 + ), + error_inputs_func=error_inputs_window, + ), + make_signal_windows_opinfo( + name="signal.windows.nuttall", + ref=reference_signal_window(scipy.signal.windows.nuttall) + if TEST_SCIPY + else None, + sample_inputs_func=sample_inputs_window, + reference_inputs_func=reference_inputs_window, + error_inputs_func=error_inputs_window, + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..3e1f816d9f73fec05fa59ac8b863236242b85f7d --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/sparse.py @@ -0,0 +1,925 @@ +# mypy: ignore-errors + +import os + +import torch +from torch.testing import make_tensor # noqa: F401 +from torch.testing._internal.opinfo.core import ( # noqa: F401 + BinaryUfuncInfo, + ErrorInput, + generate_elementwise_binary_tensors, + ReductionOpInfo, + sample_inputs_reduction, + SampleInput, +) + + +def _check_validate(op_info, sample): + def _check_fail(sample): + try: + op_info( + sample.sample_input.input, + *sample.sample_input.args, + **sample.sample_input.kwargs, + ) + except sample.error_type: + pass + except Exception as msg: + raise AssertionError( # noqa: B904 + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got {type(msg).__name__}: {msg}" + ) + else: + raise AssertionError( + f"{op_info.name} on {sample.sample_input=} expected exception " + f"{sample.error_type}: {sample.error_regex}, got none." + ) + + def _check_success(sample): + try: + op_info(sample.input, *sample.args, **sample.kwargs) + except Exception as msg: + raise AssertionError( # noqa: B904 + f"{op_info.name} on {sample=} expected to succeed " + f", got {type(msg).__name__}: {msg}" + ) + + if isinstance(sample, ErrorInput): + _check_fail(sample) + else: + _check_success(sample) + + +def _sample_inputs_sparse( + sample_inputs, + maybe_failing_sample_inputs, + validate_sample_input, + op_info, + *args, + **kwargs, +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + # Error inputs are handled in error_inputs_sparse + + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, SampleInput): + yield sample + + +def _error_inputs_sparse( + maybe_failing_sample_inputs, validate_sample_input, op_info, *args, **kwargs +): + check_validate = ( + os.environ.get("PYTORCH_TEST_CHECK_VALIDATE_SPARSE_SAMPLES", "0") == "1" + ) + for sample in maybe_failing_sample_inputs(op_info, *args, **kwargs): + sample = validate_sample_input(op_info, sample, check_validate=check_validate) + if isinstance(sample, ErrorInput): + yield sample + # Sample inputs are handled in sample_inputs_sparse + + +def _apply_requires_grad_to_samples(sample_inputs): + """Decorator to _maybe_failing_sample_inputs_... generator functions + that clones and sets requires_grad argument to tensors in sample + input arguments. This is needed when the generated samples share + tensor instances. + """ + + def wrapper(op_info, device, dtype, requires_grad, layout, **kwargs): + def apply_requires_grad(x): + if ( + not isinstance(x, torch.Tensor) + or x.requires_grad + or not requires_grad + or not (x.is_floating_point() or x.is_complex()) + ): + return x + return x.detach().clone().requires_grad_(requires_grad) + + if requires_grad: + for sample_input in sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ): + yield sample_input.transform(apply_requires_grad) + else: + yield from sample_inputs( + op_info, device, dtype, requires_grad, layout, **kwargs + ) + + return wrapper + + +def sample_inputs_sparse_reduction( + op_info, device, dtype, requires_grad, layout, blocksize=None, **kwargs +): + """Sample inputs for reduction operations on sparse tensors.""" + layout_name = str(layout).split(".", 1)[-1].rsplit("_coo", 1)[0] + op_supports_layout = getattr(op_info, "supports_" + layout_name) + if not op_supports_layout: + return + + for sample_input in sample_inputs_reduction( + op_info, device, dtype, requires_grad, **kwargs + ): + if sample_input.input.ndim == 0: + # scalar sparse tensors are not supported + continue + + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if sample_input.input.ndim < 2: + # conversion to sparse compressed tensors requires at + # least 2 dimensional tensors + continue + if sample_input.input.ndim > 2 and (sample_input.input == 0).any(): + # Skip batched sparse compressed samples that contain + # explicit zeros because to_sparse(layout=..) will + # fail, see gh-98495. + # TODO: remove this if-block after gh-98495 is fixed. + continue + + if layout in {torch.sparse_bsr, torch.sparse_bsc} and blocksize is None: + blocksize = (1, 1) + + yield SampleInput( + sample_input.input.detach() + .to_sparse(layout=layout, blocksize=blocksize) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if layout is torch.sparse_coo and (dtype.is_floating_point or dtype.is_complex): + # uncoalesced samples + inp = sample_input.input.detach().to_sparse(layout=layout) + inp = torch.sparse_coo_tensor( + inp.indices().repeat(1, 2), + inp.values().repeat(2), + inp.shape, + dtype=inp.dtype, + device=inp.device, + ) + assert not inp.is_coalesced() + yield SampleInput( + inp.requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + if sample_input.input.ndim > 2: + # hybrid samples + yield SampleInput( + sample_input.input.detach() + .to_sparse( + layout=layout, + blocksize=blocksize, + dense_dim=sample_input.input.ndim - 2, + ) + .requires_grad_(requires_grad), + args=sample_input.args, + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_sparse_reduction(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + UNSPECIFIED = object() + if op_info.name == "sum": + sample = _validate_sample_input_sparse_reduction_sum(sample) + + if op_info.name in {"masked.sum"}: + mask = sample.kwargs.get("mask", UNSPECIFIED) + if ( + mask not in {None, UNSPECIFIED} + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif mask is UNSPECIFIED: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + + if op_info.name in {"masked.amax", "masked.amin", "masked.mean", "masked.prod"}: + t_inp = sample.input + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + mask = sample.kwargs.get("mask") + if ( + mask is not None + and mask.ndim > 2 + and mask.layout is torch.strided + and (mask == 0).any() + ): + # TODO: remove this if-block after gh-98495 is fixed. + sample = ErrorInput( + sample, + error_regex="Expect the same number of specified elements per batch.", + ) + elif mask is None: + sample = ErrorInput( + sample, + error_type=ValueError, + error_regex="masked (.*) expects explicit mask for sparse_csr tensor input", + ) + elif ( + mask.layout is sample.input.layout + and mask.ndim > 2 + and op_info.name == "masked.mean" + ): + sample = ErrorInput( + sample, + error_type=TypeError, + error_regex=( + "where[(][)] received an invalid combination of arguments" + " - got [(]Tensor, Tensor, NoneType[)]" + ), + ) + elif not sample.kwargs.get("keepdim"): + sample = ErrorInput( + sample, + error_type=(AssertionError, RuntimeError), + error_regex="reduction operations on (CSR|CSC) tensors with keepdim=False is unsupported", + ) + elif ( + sample.input.ndim > 2 + and (sample.kwargs.get("dim") not in {0, 1}) + and mask.ndim > 2 + and mask.layout is not torch.strided + ): + if sample.kwargs.get("dim") == (0, -1): + sample = ErrorInput( + sample, + error_regex="tensor dimensionality must be sum of batch, base, and dense dimensionalities", + ) + elif op_info.name == "masked.prod": + sample = ErrorInput( + sample, + error_regex="input_dim == 2 INTERNAL ASSERT FAILED at", + ) + else: + sample = ErrorInput( + sample, + error_type=AssertionError, + error_regex="Sparse CSR tensors are 2D and only support reduction along dim 0 or 1.", + ) + elif sample.input.ndim > 2: + sample = ErrorInput( + sample, + error_regex="crow_indices is supposed to be a vector, but got 3 dimensional tensor.", + ) + elif ( + mask.layout is t_inp.layout + and mask._nnz() != t_inp._nnz() + and t_inp.dense_dim() > 0 + ): + sample = ErrorInput( + sample, + error_regex="Index tensor must have the same number of dimensions as src tensor", + ) + + if check_validate: + _check_validate(op_info, sample) + + return sample + + +def _validate_sample_input_sparse_reduction_sum(sample, check_validate=False): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + dim = t_kwargs.get("dim") + keepdim = t_kwargs.get("keepdim") + layout = t_inp.layout + if isinstance(dim, (int, list, tuple)): + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if layout in {torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}: + return ErrorInput( + sample, + error_regex=( + "Currently the only compressed sparse format supported for sum.dim_IntList is CSR, but got layout" + ), + ) + if layout in {torch.sparse_csr, torch.sparse_csc} and not keepdim: + return ErrorInput( + sample, + error_regex=( + "reduction operations on CSR tensors with keepdim=False is unsupported" + ), + ) + if t_inp.dim() != 2: + return ErrorInput( + sample, + error_regex=("input_dim == 2 INTERNAL ASSERT"), + ) + if layout == torch.sparse_csr: + if t_inp.dtype == torch.bool: + return ErrorInput( + sample, + error_regex=("_sparse_csr_sum_cpu not implemented for 'Bool'"), + ) + if t_inp.dtype == torch.complex32: + return ErrorInput( + sample, + error_regex=( + "_sparse_csr_sum_cuda not implemented for 'ComplexHalf'" + ), + ) + return sample + + +def _maybe_failing_sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + if layout in [ + torch.sparse_csr, + torch.sparse_csc, + ]: + # NotImplementedError: Could not run 'aten::sum.IntList_out' with arguments from the 'SparseCsrCPU' backend. + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + if layout in [ + torch.sparse_bsr, + torch.sparse_bsc, + ]: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(2, 2)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0, keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, dense_dim=1, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,), keepdim=True), + ) + yield SampleInput( + torch.tensor([[[0, 1]], [[2, 3]]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1), dense_dim=1) + .requires_grad_(requires_grad), + kwargs=dict(dim=(0,)), + ) + + # RuntimeError: torch.empty: Only batched sparse compressed (non-block) tensors are supported, but got size [2] + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype) + .to_sparse(layout=layout, blocksize=(1, 1)) + .requires_grad_(requires_grad), + kwargs=dict(dim=0), + ) + + +def sample_inputs_sparse_reduction_sum( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for sum on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_reduction, + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_reduction_sum(op_info, device, layout, **kwargs): + """Error inputs for sum on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_reduction_sum, + _validate_sample_input_sparse_reduction, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def sample_inputs_sparse_elementwise_binary_operation( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for elementwise binary operations on sparse tensors. + + The samples include regular, zero-sized, batched, and hybrid + sparse tensors as well as rhs scalars. All tensors are full tensors. + """ + + def _to_sparse(tensor, **kwargs): + return tensor.detach().to_sparse(**kwargs).requires_grad_(requires_grad) + + for sample_input in generate_elementwise_binary_tensors( + op_info, + device=device, + dtype=dtype, + requires_grad=requires_grad, + exclude_zero=True, + **kwargs, + ): + lhs, rhs = sample_input.input, sample_input.args[0] + min_dense_dim = 0 + max_dense_dim = lhs.ndim - 1 + if layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + if lhs.ndim < 2: + # sparse compressed tensors sparse_dim must be 2 + continue + max_dense_dim = lhs.ndim - 2 + + for dense_dim in range(min_dense_dim, max_dense_dim + 1): + if layout in {torch.sparse_bsr, torch.sparse_bsc}: + blocksizes = [(1, 1)] + if lhs.numel() > 0: + blocksizes.append( + ( + lhs.shape[lhs.ndim - 2 - dense_dim], + lhs.shape[lhs.ndim - 1 - dense_dim], + ) + ) + else: + blocksizes = [None] + for blocksize in blocksizes: + to_sparse_kwargs = dict( + layout=layout, dense_dim=dense_dim, blocksize=blocksize + ) + lhs_sparse = _to_sparse(lhs, **to_sparse_kwargs) + rhs_sparse = _to_sparse(rhs, **to_sparse_kwargs) + # op(sparse, sparse) + yield SampleInput( + lhs_sparse, + args=(rhs_sparse, *sample_input.args[1:]), + kwargs=sample_input.kwargs, + ) + # op(sparse, scalar) + yield SampleInput( + lhs_sparse, + args=( + make_tensor( + (), dtype=dtype, device=device, requires_grad=requires_grad + ), + *sample_input.args[1:], + ), + kwargs=sample_input.kwargs, + ) + + +def _validate_sample_input_elementwise_binary_sparse_mul(sample): + # NOTE: When fixing a failing sample case, remove the + # corresponding if-block + t_inp, t_args, t_kwargs = sample.input, sample.args, sample.kwargs + batch_dim = t_inp.dim() - t_inp.dense_dim() - t_inp.sparse_dim() + layout = t_inp.layout + dtype = t_inp.dtype + if layout is torch.sparse_csr and batch_dim > 0 and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex=( + "coo_to_sparse_csr: conversion from Sparse to SparseCsr for input" + " tensors with sparse_dim[(][)]!=2 is not supported" + ), + ) + elif layout is torch.sparse_csc and t_args[0].ndim > 0: + return ErrorInput( + sample, error_regex="Expected result Tensor to be of format CSR" + ) + elif layout is torch.sparse_bsr and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsr", + ) + elif layout is torch.sparse_bsc and t_args[0].ndim > 0: + return ErrorInput( + sample, + error_regex="empty_sparse_compressed expected sparse compressed [(]non-block[)] tensor layout but got SparseBsc", + ) + elif ( + layout is torch.sparse_coo + and dtype is torch.bool + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + and t_inp.dense_dim() > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Bool'" + ) + elif ( + layout in {torch.sparse_coo, torch.sparse_csr} + and dtype is torch.bool + and t_inp._nnz() > 0 + and t_args[0].ndim > 0 + and t_inp.is_cpu + and t_inp.numel() > 0 + ): + return ErrorInput( + sample, error_regex="\"mul_out_sparse\" not implemented for 'Bool'" + ) + elif ( + layout is torch.sparse_csr + and t_args[0].layout is torch.strided + and 0 < t_args[0].ndim + and t_args[0].ndim < t_inp.ndim + ): + return ErrorInput( + sample, error_regex="sparse_mask_sparse_csr expects self to be 2D" + ) + elif layout is torch.sparse_csr and ( + (t_args[0].layout is torch.strided and 0 < t_args[0].ndim) + or (t_args[0].layout is layout and t_inp.shape != t_args[0].shape) + ): + return ErrorInput( + sample, + error_regex=( + "expects sparse inputs with equal dimensionality, number of sparse dimensions," + " and shape of sparse dimensions" + ), + ) + elif ( + layout is torch.sparse_csr + and t_inp.dense_dim() > 0 + and t_inp._nnz() > 0 + and t_inp.is_cpu + and dtype is torch.float16 + and t_args[0].ndim > 0 + ): + return ErrorInput( + sample, error_regex="\"addcmul_cpu_out\" not implemented for 'Half'" + ) + return sample + + +@_apply_requires_grad_to_samples +def _maybe_failing_sample_inputs_sparse_elementwise_binary_mul( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Generator of samples that are known to fail or that were failing in past.""" + # NOTE: When fixing a failing case, remove the Exception comment + # but keep the `yield sample` statement. + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + regular = torch.tensor([[1, 2], [3, 4]], device=device, dtype=dtype).to_sparse( + layout=layout, dense_dim=0, blocksize=blocksize + ) + batch = torch.tensor( + [[[1, 2], [3, 4]], [[4, 5], [6, 7]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=0, blocksize=blocksize) + hybrid = torch.tensor( + [[[1], [2]], [[3], [4]]], device=device, dtype=dtype + ).to_sparse(layout=layout, dense_dim=1, blocksize=blocksize) + + if layout is torch.sparse_csr: + # RuntimeError: crow_indices is supposed to be a vector, but got 2 dimensional tensor + yield SampleInput(batch, args=(batch,)) + # RuntimeError: Only tensors with two sparse dimensions can be + # converted to the SparseCsr layout, got self with 3 sparse + # dimensions. + yield SampleInput( + torch.zeros_like(hybrid).requires_grad_(requires_grad), + args=(torch.zeros_like(hybrid).requires_grad_(requires_grad),), + ) + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_csc: + # RuntimeError: Expected result Tensor to be of format CSR + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsr: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsr + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_bsc: + # RuntimeError: empty_sparse_compressed expected sparse compressed (non-block) tensor layout but got SparseBsc + yield SampleInput(regular, args=(regular,)) + if layout is torch.sparse_coo: + if dtype is torch.complex32: + # RuntimeError: "mul_out_sparse" not implemented for 'ComplexHalf' + yield SampleInput(regular, args=(regular,)) + if dtype is torch.bool and regular.is_cpu: + # RuntimeError: "mul_out_sparse" not implemented for 'Bool' + yield SampleInput(regular, args=(regular,)) + if dtype in {torch.bool, torch.float16} and regular.is_cpu: + # RuntimeError: "addcmul_cpu_out" not implemented for '(Bool|Half)' + yield SampleInput(hybrid, args=(hybrid,)) + + +def _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=False +): + if op_info.name == "mul": + sample = _validate_sample_input_elementwise_binary_sparse_mul(sample) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def sample_inputs_sparse_mul(op_info, device, dtype, requires_grad, layout, **kwargs): + """Sample inputs for mul operation on sparse tensors.""" + yield from _sample_inputs_sparse( + sample_inputs_sparse_elementwise_binary_operation, + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_mul(op_info, device, layout, **kwargs): + """Error inputs for mul operation on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_elementwise_binary_mul, + _validate_sample_input_sparse_elementwise_binary_operation, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + from torch.testing._internal.common_utils import TestCase + + for tensor in TestCase().generate_simple_inputs( + layout, + device=device, + dtype=dtype, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=False, + enable_non_contiguous_values=False, + ): + yield SampleInput(tensor, args=(), kwargs={}) + yield SampleInput( + tensor, args=(), kwargs=dict(device=device, dtype=dtype, layout=layout) + ) + + if dtype is not torch.float64: + yield SampleInput(tensor, args=(), kwargs=dict(dtype=torch.float64)) + + if torch.cuda.is_available(): + other_device = "cuda" if tensor.device.type == "cpu" else "cpu" + yield SampleInput(tensor, args=(), kwargs=dict(device=other_device)) + + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + yield SampleInput(tensor, args=(), kwargs=dict(layout=other_layout)) + + if layout is not torch.sparse_coo: + yield SampleInput(tensor, args=(), kwargs=dict(layout=torch.sparse_coo)) + + +def _validate_sample_input_sparse_like_fns(op_info, sample, check_validate=False): + if sample.input.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + } and op_info.name not in {"zeros_like"}: + if sample.kwargs.get("layout", sample.input.layout) != sample.input.layout: + return ErrorInput( + sample, + error_regex=( + "empty_like with different sparse layout is not supported" + " \\(self is Sparse(Csc|Csr|Bsc|Bsr) but you requested Sparse(Csr|Csc|Bsr|Bsc)\\)" + ), + ) + if sample.input.layout is torch.sparse_coo: + return ErrorInput( + sample, + error_regex=( + "Could not run 'aten::normal_' with arguments from the 'Sparse(CPU|CUDA)' backend." + ), + ) + if check_validate: + _check_validate(op_info, sample) + return sample + + +def _maybe_failing_sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + if torch.cuda.is_available() and layout is not torch.sparse_coo: + other_device = "cuda" if torch.device(device).type == "cpu" else "cpu" + if layout is torch.sparse_csr: + other_layout = torch.sparse_csc + elif layout is torch.sparse_csc: + other_layout = torch.sparse_csr + elif layout is torch.sparse_bsr: + other_layout = torch.sparse_bsc + elif layout is torch.sparse_bsc: + other_layout = torch.sparse_bsr + else: + other_layout = torch.strided + + blocksize = (1, 1) if layout in {torch.sparse_bsr, torch.sparse_bsc} else None + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(device=other_device), + ) + + yield SampleInput( + torch.tensor([[0, 1], [2, 3]], dtype=dtype, device=device).to_sparse( + layout=layout, blocksize=blocksize + ), + kwargs=dict(layout=other_layout), + ) + + +def sample_inputs_sparse_like_fns( + op_info, device, dtype, requires_grad, layout, **kwargs +): + """Sample inputs for like-functions on sparse tensors.""" + yield from _sample_inputs_sparse( + _sample_inputs_sparse_like_fns, + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def error_inputs_sparse_like_fns(op_info, device, layout, **kwargs): + """Error inputs for like-functions on sparse tensors.""" + dtype = torch.float64 + requires_grad = False + yield from _error_inputs_sparse( + _maybe_failing_sample_inputs_sparse_like_fns, + _validate_sample_input_sparse_like_fns, + op_info, + device, + dtype, + requires_grad, + layout, + **kwargs, + ) + + +def _validate_sample_input_sparse_default(op_info, sample, check_validate=False): + if op_info.name == "to_sparse": + if ( + sample.input.layout + in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + and len(sample.args) == 1 + and isinstance(sample.args[0], int) + and sample.args[0] != 2 + ): + sample = ErrorInput( + sample, + error_regex="sparse dim argument must be 2 for sparse_compressed_to_sparse", + ) + + if check_validate: + _check_validate(op_info, sample) + return sample + + +def validate_sample_input_sparse(op_info, sample, check_validate=False): + """Return the specified sample when it is valid and supported by the + operation. Otherwise, return the sample as ErrorInput instance. + + When check_validate is True, the result is validated against + calling the op on the sample. + """ + if isinstance(op_info, ReductionOpInfo): + return _validate_sample_input_sparse_reduction( + op_info, sample, check_validate=check_validate + ) + elif isinstance(op_info, BinaryUfuncInfo): + return _validate_sample_input_sparse_elementwise_binary_operation( + op_info, sample, check_validate=check_validate + ) + else: + return _validate_sample_input_sparse_default( + op_info, sample, check_validate=check_validate + ) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py new file mode 100644 index 0000000000000000000000000000000000000000..5b137799db8e57b211fe6446bee58dcba24dfd07 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/definitions/special.py @@ -0,0 +1,843 @@ +# mypy: ignore-errors + +import unittest +from functools import partial +from itertools import product +from typing import List + +import numpy as np + +import torch +from torch.testing import make_tensor +from torch.testing._internal.common_device_type import ( + precisionOverride, + tol, + toleranceOverride, +) +from torch.testing._internal.common_dtype import all_types_and, floating_types +from torch.testing._internal.common_utils import ( + TEST_SCIPY, + TEST_WITH_ROCM, + torch_to_numpy_dtype_dict, +) +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + DecorateInfo, + L, + NumericsFilter, + OpInfo, + S, + SampleInput, + UnaryUfuncInfo, +) +from torch.testing._internal.opinfo.refs import ( + ElementwiseBinaryPythonRefInfo, + ElementwiseUnaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, +) + + +if TEST_SCIPY: + import scipy.special + + +# TODO: Consolidate `i0e` with sample_inputs_unary when `make_tensor`, +# supports `exclude` argument. +# For more context: https://github.com/pytorch/pytorch/pull/56352#discussion_r633277617 +def sample_inputs_i0_i1(op_info, device, dtype, requires_grad, **kwargs): + exclude_zero = requires_grad and op_info.op == torch.special.i0e + make_arg = partial( + make_tensor, + dtype=dtype, + device=device, + requires_grad=requires_grad, + exclude_zero=exclude_zero, + ) + yield SampleInput(make_arg((S,))) + yield SampleInput(make_arg(())) + + if requires_grad and not exclude_zero: + # Special Case for gradient + # Sample with `0` in the input + t = make_arg((S,)) + t[0] = 0 + + yield SampleInput(t) + + +def sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, + device=device, + # TODO: eliminate low after gh-106692 is fixed: + low=(1 if dtype in {torch.int32, torch.int64} else None), + dtype=dtype, + requires_grad=requires_grad, + ) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + for shape, n in product(tensor_shapes, ns): + yield SampleInput(make_arg(shape), args=(n,)) + + +def reference_polygamma(x, n): + # WEIRD `scipy.special.polygamma` behavior + # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype + # dtype('float64') + # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype + # dtype('float32') + # + # Thus we cast output to the default torch dtype or preserve double + result_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + if x.dtype == np.double: + result_dtype = np.double + return scipy.special.polygamma(n, x).astype(result_dtype) + + +def sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs): + low, _ = op_info.domain + + if requires_grad: + low = 0 + op_info._domain_eps + + make_arg = partial( + make_tensor, dtype=dtype, device=device, low=low, requires_grad=requires_grad + ) + yield SampleInput(make_arg((L,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_erfcx(op_info, device, dtype, requires_grad, **kwargs): + for shape in ((L,), (1, 0, 3), ()): + yield SampleInput( + make_tensor( + shape, + device=device, + dtype=dtype, + low=-5, + requires_grad=requires_grad, + ), + ) + + +op_db: List[OpInfo] = [ + UnaryUfuncInfo( + "special.i0e", + aten_name="special_i0e", + ref=scipy.special.i0e if TEST_SCIPY else None, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.i1", + aten_name="special_i1", + ref=np_unary_ufunc_integer_promotion_wrapper(scipy.special.i1) + if TEST_SCIPY + else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + ), + UnaryUfuncInfo( + "special.i1e", + aten_name="special_i1e", + ref=scipy.special.i1e if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + dtypesIfCUDA=all_types_and(torch.bool), + sample_inputs_func=sample_inputs_i0_i1, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.ndtr", + aten_name="special_ndtr", + decorators=(precisionOverride({torch.bfloat16: 5e-3, torch.float16: 5e-4}),), + ref=scipy.special.ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatch stub: unsupported device typemeta + DecorateInfo( + unittest.expectedFailure, + "TestFwdGradients", + "test_fn_fwgrad_bwgrad", + device_type="meta", + ), + ), + ), + # A separate OpInfo entry for special.polygamma is needed to reorder the arguments + # for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939 + UnaryUfuncInfo( + "special.polygamma", + op=lambda x, n, **kwargs: torch.special.polygamma(n, x, **kwargs), + variant_test_name="special_polygamma_n_0", + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # lambda impl + DecorateInfo( + unittest.expectedFailure, "TestJit", "test_variant_consistency_jit" + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + sample_kwargs=lambda device, dtype, input: ({"n": 0}, {"n": 0}), + # polygamma functions have multiple singularities at x having non-positive integer value + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1 + ), + ), + BinaryUfuncInfo( + "special.xlog1py", + aten_name="special_xlog1py", + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test -1 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=-0.99), + ), + BinaryUfuncInfo( + "special.zeta", + aten_name="special_zeta", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), + # TODO: FIXME + # OpInfo entry to verify the gradient formula of `other`/`q` + # BinaryUfuncInfo('special.zeta', + # op=lambda q, x, **kwargs: torch.special.zeta(x, q, **kwargs), + # aten_name='special_zeta', + # variant_test_name='grad', + # dtypes=all_types_and(torch.bool), + # promotes_int_to_float=True, + # supports_autograd=True, + # supports_rhs_python_scalar=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable") + # ], + # skips=( + # # Lambda doesn't work in JIT test + # # AssertionError: JIT Test does not execute any logic + # DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"), + # )), + UnaryUfuncInfo( + "special.entr", + ref=scipy.special.entr if TEST_SCIPY else None, + aten_name="special_entr", + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_entr, + ), + UnaryUfuncInfo( + "special.ndtri", + ref=scipy.special.ndtri if TEST_SCIPY else None, + domain=(0, 1), + aten_name="special_ndtri", + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.log_ndtr", + aten_name="special_log_ndtr", + ref=scipy.special.log_ndtr if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + UnaryUfuncInfo( + "special.erfcx", + ref=scipy.special.erfcx if TEST_SCIPY else None, + aten_name="special_erfcx", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + dtypes=all_types_and(torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_erfcx, + ), + UnaryUfuncInfo( + "special.airy_ai", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.airy(x)[0] if TEST_SCIPY else None, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + ), + ), + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_j1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.j1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y0", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.bessel_y1", + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.y1 if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_h", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + # Greatest absolute difference: inf + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + DecorateInfo(unittest.skip("Hangs on ROCm 6.1"), active_if=TEST_WITH_ROCM), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.hermite_polynomial_he", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.laguerre_polynomial_l", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.legendre_polynomial_p", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_i1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.i1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k0", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.modified_bessel_k1", + decorators=( + precisionOverride( + { + torch.float32: 1e-03, + torch.float64: 1e-05, + }, + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1 if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k0e if TEST_SCIPY else None, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.scaled_modified_bessel_k1", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=scipy.special.k1e if TEST_SCIPY else None, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_t", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_u", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_v", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + BinaryUfuncInfo( + "special.shifted_chebyshev_polynomial_w", + dtypes=all_types_and(torch.bool), + promotes_int_to_float=True, + skips=( + DecorateInfo( + unittest.skip( + "Skipping - testing takes an unreasonably long time, #79528" + ) + ), + DecorateInfo(unittest.skip("Skipped!"), "TestCudaFuserOpInfo"), + DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"), + DecorateInfo( + unittest.skip("testing takes an unreasonably long time, #79528"), + "TestCommon", + "test_compare_cpu", + ), + ), + supports_one_python_scalar=True, + supports_autograd=False, + ), + UnaryUfuncInfo( + "special.spherical_bessel_j0", + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + dtypes=all_types_and(torch.bool), + ref=lambda x: scipy.special.spherical_jn(0, x) if TEST_SCIPY else None, + supports_autograd=False, + ), +] + +python_ref_db: List[OpInfo] = [ + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j0", + torch_opinfo_name="special.bessel_j0", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.bessel_j1", + torch_opinfo_name="special.bessel_j1", + op_db=op_db, + decorators=( + precisionOverride( + { + torch.float32: 1e-04, + torch.float64: 1e-05, + }, + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.entr", + torch_opinfo_name="special.entr", + op_db=op_db, + decorators=(precisionOverride({torch.float16: 1e-1, torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=[torch.bfloat16, torch.float16], + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.erfcx", + torch_opinfo_name="special.erfcx", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=0, rtol=4e-6), + } + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i0e", + torch_opinfo_name="special.i0e", + op_db=op_db, + decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 3e-1}),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1", + torch_opinfo_name="special.i1", + op_db=op_db, + decorators=( + DecorateInfo( + toleranceOverride( + { + torch.float32: tol(atol=1e-4, rtol=0), + torch.bool: tol(atol=1e-4, rtol=0), + } + ) + ), + ), + skips=( + DecorateInfo( + unittest.skip("Incorrect result!"), + "TestUnaryUfuncs", + "test_reference_numerics_large", + dtypes=(torch.int8,), + ), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.i1e", + torch_opinfo_name="special.i1e", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.log_ndtr", + torch_opinfo_name="special.log_ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtr", + torch_opinfo_name="special.ndtr", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.ndtri", + torch_opinfo_name="special.ndtri", + op_db=op_db, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.spherical_bessel_j0", + torch_opinfo_name="special.spherical_bessel_j0", + op_db=op_db, + decorators=( + toleranceOverride( + { + torch.float32: tol(atol=1e-03, rtol=1e-03), + torch.float64: tol(atol=1e-05, rtol=1e-03), + } + ), + ), + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.zeta", + torch_opinfo_name="special.zeta", + supports_one_python_scalar=True, + op_db=op_db, + skips=( + # Reference reference_inputs nans and infs on cuda and nan, inf, 0., -inf for cpu + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"), + ), + ), +] diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py new file mode 100644 index 0000000000000000000000000000000000000000..435a9d113164b3652af4d246655f579d1b72d4dc --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/refs.py @@ -0,0 +1,207 @@ +# mypy: ignore-errors + +from torch.testing._internal.opinfo.core import ( + BinaryUfuncInfo, + OpInfo, + ReductionOpInfo, + UnaryUfuncInfo, +) + + +# NOTE [Python References] +# Python References emulate existing PyTorch operations, but can ultimately +# be expressed in terms of "primitive" operations from torch._prims. +# +# These references are experimental. +# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577 +# for additional context. +# +# Python Reference OpInfos should be added to the python_ref_db list below. +# Tests can opt-into running on these references by including +# that list in the Sequence they pass to the @ops decorator. +# +# When a Python Reference OpInfo is constructed a pointer to an +# existing OpInfo must be provided using the torch_opinfo_name kwarg. +# The existing OpInfo with that name and no variant will be found +# to inherit from. +# +# Instead of just inheriting the existing OpInfo's metadata, the +# Python Reference OpInfos inherit the existing OpInfo's +# construction arguments. These arguments can be overridden +# by adding kwargs to the constructor. + + +def _find_referenced_opinfo(referenced_name, variant_name, *, op_db=None): + """ + Finds the OpInfo with the given name that has no variant name. + """ + # NOTE: searching the global op_db doesn't work when OpInfos are split into + # different modules, as otherwise the op_db will not be fully constructed + # yet. So, instead the local op_db must be passed in explicitly. + if op_db is None: + from torch.testing._internal.common_methods_invocations import op_db + + for opinfo in op_db: + if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name: + return opinfo + + +def _inherit_constructor_args(name, op, inherited, overrides): + # inherits metadata + common_kwargs = { + "name": name, + "op": op, + "aliases": None, # TODO add a check for alias coverage + "method_variant": None, + "inplace_variant": None, # TODO: add a check for inplace coverage + "supports_scripting": False, + } + + # Acquires inherited kwargs + kwargs = inherited.copy() + + # Fixes metadata + if "kwargs" in kwargs: + kwargs.update(kwargs["kwargs"]) + del kwargs["kwargs"] + if "self" in kwargs: + del kwargs["self"] + if "__class__" in kwargs: + del kwargs["__class__"] + if "skips" in kwargs: + del kwargs["skips"] + if "decorators" in kwargs: + del kwargs["decorators"] + + # Overrides metadata + kwargs.update(common_kwargs) + kwargs.update(overrides) + + # At the moment no prims support autograd, so we must not run autograd + # tests e.g. when testing dtype support. Once we start writing autograd + # formulas for prims this can be removed. + kwargs["supports_autograd"] = False + kwargs["supports_gradgrad"] = False + kwargs["supports_fwgrad_bwgrad"] = False + kwargs["supports_inplace_autograd"] = False + kwargs["supports_forward_ad"] = False + + return kwargs + + +class PythonRefInfo(OpInfo): + """ + An OpInfo for a Python reference of an OpInfo base class operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, OpInfo) + + inherited = self.torch_opinfo._original_opinfo_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + super().__init__(**ukwargs) + + +class ReductionPythonRefInfo(ReductionOpInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, ReductionOpInfo) + + inherited = self.torch_opinfo._original_reduction_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + # See https://github.com/pytorch/pytorch/issues/77216 + self.validate_view_consistency = False + + super().__init__(**ukwargs) + + +class ElementwiseUnaryPythonRefInfo(UnaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise unary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + validate_view_consistency=True, + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + self.validate_view_consistency = validate_view_consistency + assert isinstance(self.torch_opinfo, UnaryUfuncInfo) + + inherited = self.torch_opinfo._original_unary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) + + +class ElementwiseBinaryPythonRefInfo(BinaryUfuncInfo): + """ + An OpInfo for a Python reference of an elementwise binary operation. + """ + + def __init__( + self, + name, # the stringname of the callable Python reference + *, + op=None, # the function variant of the operation, populated as torch. if None + op_db=None, # The database of opinfos to search for the parent opinfo + torch_opinfo_name, # the string name of the corresponding torch opinfo + torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo + **kwargs, + ): # additional kwargs override kwargs inherited from the torch opinfo + self.torch_opinfo_name = torch_opinfo_name + self.torch_opinfo_variant_name = torch_opinfo_variant_name + self.torch_opinfo = _find_referenced_opinfo( + torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db + ) + assert isinstance(self.torch_opinfo, BinaryUfuncInfo) + + inherited = self.torch_opinfo._original_binary_ufunc_args + ukwargs = _inherit_constructor_args(name, op, inherited, kwargs) + + super().__init__(**ukwargs) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..41973dc2c0518a55bb8e3ac664303fec1ce481f9 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/opinfo/utils.py @@ -0,0 +1,273 @@ +# mypy: ignore-errors + +import collections +import warnings +from functools import partial, wraps +from typing import Sequence + +import numpy as np + +import torch +from torch.testing._internal.common_cuda import TEST_CUDA +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, + all_types, + all_types_and, + all_types_and_complex, + all_types_and_complex_and, + all_types_and_half, + complex_types, + floating_and_complex_types, + floating_and_complex_types_and, + floating_types, + floating_types_and, + floating_types_and_half, + integral_types, + integral_types_and, +) +from torch.testing._internal.common_utils import torch_to_numpy_dtype_dict + + +COMPLETE_DTYPES_DISPATCH = ( + all_types, + all_types_and_complex, + all_types_and_half, + floating_types, + floating_and_complex_types, + floating_types_and_half, + integral_types, + complex_types, +) + +EXTENSIBLE_DTYPE_DISPATCH = ( + all_types_and_complex_and, + floating_types_and, + floating_and_complex_types_and, + integral_types_and, + all_types_and, +) + +# Better way to acquire devices? +DEVICES = ["cpu"] + (["cuda"] if TEST_CUDA else []) + + +class _dynamic_dispatch_dtypes(_dispatch_dtypes): + # Class to tag the dynamically generated types. + pass + + +def get_supported_dtypes(op, sample_inputs_fn, device_type): + # Returns the supported dtypes for the given operator and device_type pair. + assert device_type in ["cpu", "cuda"] + if not TEST_CUDA and device_type == "cuda": + warnings.warn( + "WARNING: CUDA is not available, empty_dtypes dispatch will be returned!" + ) + return _dynamic_dispatch_dtypes(()) + + supported_dtypes = set() + for dtype in all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half): + try: + samples = sample_inputs_fn(op, device_type, dtype, False) + except RuntimeError: + # If `sample_inputs_fn` doesn't support sampling for a given + # `dtype`, we assume that the `dtype` is not supported. + # We raise a warning, so that user knows that this was the case + # and can investigate if there was an issue with the `sample_inputs_fn`. + warnings.warn( + f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}" + ) + continue + + # We assume the dtype is supported + # only if all samples pass for the given dtype. + supported = True + for sample in samples: + try: + op(sample.input, *sample.args, **sample.kwargs) + except RuntimeError as re: + # dtype is not supported + supported = False + break + + if supported: + supported_dtypes.add(dtype) + + return _dynamic_dispatch_dtypes(supported_dtypes) + + +def dtypes_dispatch_hint(dtypes): + # Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH) + # and its string representation for the passed `dtypes`. + return_type = collections.namedtuple("return_type", "dispatch_fn dispatch_fn_str") + + # CUDA is not available, dtypes will be empty. + if len(dtypes) == 0: + return return_type((), "()") + + set_dtypes = set(dtypes) + for dispatch in COMPLETE_DTYPES_DISPATCH: + # Short circuit if we get an exact match. + if set(dispatch()) == set_dtypes: + return return_type(dispatch, dispatch.__name__ + "()") + + chosen_dispatch = None + chosen_dispatch_score = 0.0 + for dispatch in EXTENSIBLE_DTYPE_DISPATCH: + dispatch_dtypes = set(dispatch()) + if not dispatch_dtypes.issubset(set_dtypes): + continue + + score = len(dispatch_dtypes) + if score > chosen_dispatch_score: + chosen_dispatch_score = score + chosen_dispatch = dispatch + + # If user passed dtypes which are lower than the lowest + # dispatch type available (not likely but possible in code path). + if chosen_dispatch is None: + return return_type((), str(dtypes)) + + return return_type( + partial(dispatch, *tuple(set(dtypes) - set(dispatch()))), + dispatch.__name__ + str(tuple(set(dtypes) - set(dispatch()))), + ) + + +def is_dynamic_dtype_set(op): + # Detect if the OpInfo entry acquired dtypes dynamically + # using `get_supported_dtypes`. + return op.dynamic_dtypes + + +def str_format_dynamic_dtype(op): + fmt_str = f""" + OpInfo({op.name}, + dtypes={dtypes_dispatch_hint(op.dtypes).dispatch_fn_str}, + dtypesIfCUDA={dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str}, + ) + """ + + return fmt_str + + +def np_unary_ufunc_integer_promotion_wrapper(fn): + # Wrapper that passes PyTorch's default scalar + # type as an argument to the wrapped NumPy + # unary ufunc when given an integer input. + # This mimicks PyTorch's integer->floating point + # type promotion. + # + # This is necessary when NumPy promotes + # integer types to double, since PyTorch promotes + # integer types to the default scalar type. + + # Helper to determine if promotion is needed + def is_integral(dtype): + return dtype in [ + np.bool_, + bool, + np.uint8, + np.int8, + np.int16, + np.int32, + np.int64, + ] + + @wraps(fn) + def wrapped_fn(x): + # As the default dtype can change, acquire it when function is called. + # NOTE: Promotion in PyTorch is from integer types to the default dtype + np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()] + + if is_integral(x.dtype): + return fn(x.astype(np_dtype)) + return fn(x) + + return wrapped_fn + + +def reference_reduction_numpy(f, supports_keepdims=True): + """Wraps a NumPy reduction operator. + + The wrapper function will forward dim, keepdim, mask, and identity + kwargs to the wrapped function as the NumPy equivalent axis, + keepdims, where, and initiak kwargs, respectively. + + Args: + f: NumPy reduction operator to wrap + supports_keepdims (bool, optional): Whether the NumPy operator accepts + keepdims parameter. If it does not, the wrapper will manually unsqueeze + the reduced dimensions if it was called with keepdim=True. Defaults to True. + + Returns: + Wrapped function + + """ + + @wraps(f) + def wrapper(x: np.ndarray, *args, **kwargs): + # Copy keys into a set + keys = set(kwargs.keys()) + + dim = kwargs.pop("dim", None) + keepdim = kwargs.pop("keepdim", False) + + if "dim" in keys: + dim = tuple(dim) if isinstance(dim, Sequence) else dim + + # NumPy reductions don't accept dim=0 for scalar inputs + # so we convert it to None if and only if dim is equivalent + if x.ndim == 0 and dim in {0, -1, (0,), (-1,)}: + kwargs["axis"] = None + else: + kwargs["axis"] = dim + + if "keepdim" in keys and supports_keepdims: + kwargs["keepdims"] = keepdim + + if "mask" in keys: + mask = kwargs.pop("mask") + if mask is not None: + assert mask.layout == torch.strided + kwargs["where"] = mask.cpu().numpy() + + if "identity" in keys: + identity = kwargs.pop("identity") + if identity is not None: + if identity.dtype is torch.bfloat16: + identity = identity.cpu().to(torch.float32) + else: + identity = identity.cpu() + kwargs["initial"] = identity.numpy() + + result = f(x, *args, **kwargs) + + # Unsqueeze reduced dimensions if NumPy does not support keepdims + if keepdim and not supports_keepdims and x.ndim > 0: + dim = list(range(x.ndim)) if dim is None else dim + result = np.expand_dims(result, dim) + + return result + + return wrapper + + +def prod_numpy(a, *args, **kwargs): + """ + The function will call np.prod with type as np.int64 if the input type + is int or uint64 if is uint. This is necessary because windows np.prod uses by default + int32 while on linux it uses int64. + This is for fixing integer overflow https://github.com/pytorch/pytorch/issues/77320 + + Returns: + np.prod of input + """ + if "dtype" not in kwargs: + if np.issubdtype(a.dtype, np.signedinteger): + a = a.astype(np.int64) + elif np.issubdtype(a.dtype, np.unsignedinteger): + a = a.astype(np.uint64) + + fn = reference_reduction_numpy(np.prod) + return fn(a, *args, **kwargs) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py new file mode 100644 index 0000000000000000000000000000000000000000..abc4ab6f7e4734361ec7ecea3d4755910f9cf2ab --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py @@ -0,0 +1,33 @@ +# mypy: ignore-errors + +import math + +import torch +import torch.nn as nn + + +class LinearReluFunctionalChild(nn.Module): + def __init__(self, N): + super().__init__() + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x + +class LinearReluFunctional(nn.Module): + def __init__(self, N): + super().__init__() + self.child = LinearReluFunctionalChild(N) + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = self.child(x) + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py new file mode 100644 index 0000000000000000000000000000000000000000..ad728aa909744e860cff346b53073c8712b95367 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/torchbind_impls.py @@ -0,0 +1,133 @@ +# mypy: allow-untyped-defs +import contextlib +from typing import Optional + +import torch + + +_TORCHBIND_IMPLS_INITIALIZED = False + +_TENSOR_QUEUE_GLOBAL_TEST: Optional[torch.ScriptObject] = None + + +def init_torchbind_implementations(): + global _TORCHBIND_IMPLS_INITIALIZED + global _TENSOR_QUEUE_GLOBAL_TEST + if _TORCHBIND_IMPLS_INITIALIZED: + return + + load_torchbind_test_lib() + register_fake_operators() + register_fake_classes() + _TENSOR_QUEUE_GLOBAL_TEST = _empty_tensor_queue() + _TORCHBIND_IMPLS_INITIALIZED = True + + +def _empty_tensor_queue() -> torch.ScriptObject: + return torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + + +# put these under a function because the corresponding library might not be loaded yet. +def register_fake_operators(): + @torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta") + def fake_takes_foo(foo, z): + return foo.add_tensor(z) + + @torch.library.register_fake("_TorchScriptTesting::queue_pop") + def fake_queue_pop(tq): + return tq.pop() + + @torch.library.register_fake("_TorchScriptTesting::queue_push") + def fake_queue_push(tq, x): + return tq.push(x) + + @torch.library.register_fake("_TorchScriptTesting::queue_size") + def fake_queue_size(tq): + return tq.size() + + def meta_takes_foo_list_return(foo, x): + a = foo.add_tensor(x) + b = foo.add_tensor(a) + c = foo.add_tensor(b) + return [a, b, c] + + def meta_takes_foo_tuple_return(foo, x): + a = foo.add_tensor(x) + b = foo.add_tensor(a) + return (a, b) + + torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl( + torch._C.DispatchKey.Meta + )(meta_takes_foo_list_return) + + torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl( + torch._C.DispatchKey.Meta + )(meta_takes_foo_tuple_return) + + torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)( + # make signature match original cpp implementation to support kwargs + lambda foo, x: foo.add_tensor(x) + ) + + +def register_fake_classes(): + @torch._library.register_fake_class("_TorchScriptTesting::_Foo") + class FakeFoo: + def __init__(self, x: int, y: int): + self.x = x + self.y = y + + @classmethod + def __obj_unflatten__(cls, flattend_foo): + return cls(**dict(flattend_foo)) + + def add_tensor(self, z): + return (self.x + self.y) * z + + @torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor") + class FakeContainsTensor: + def __init__(self, t: torch.Tensor): + self.t = t + + @classmethod + def __obj_unflatten__(cls, flattend_foo): + return cls(**dict(flattend_foo)) + + def get(self): + return self.t + + +def load_torchbind_test_lib(): + import unittest + + from torch.testing._internal.common_utils import ( # type: ignore[attr-defined] + find_library_location, + IS_FBCODE, + IS_MACOS, + IS_SANDCASTLE, + IS_WINDOWS, + ) + + if IS_SANDCASTLE or IS_FBCODE: + torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations") + elif IS_MACOS: + raise unittest.SkipTest("non-portable load_library call used in test") + else: + lib_file_path = find_library_location("libtorchbind_test.so") + if IS_WINDOWS: + lib_file_path = find_library_location("torchbind_test.dll") + torch.ops.load_library(str(lib_file_path)) + + +@contextlib.contextmanager +def _register_py_impl_temporarily(op_overload, key, fn): + try: + op_overload.py_impl(key)(fn) + yield + finally: + del op_overload.py_kernels[key] + op_overload._dispatch_cache.clear() diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d3a8065f294047627e5a82941d4176d1e4da1cb0 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py @@ -0,0 +1,455 @@ +# mypy: ignore-errors + +import unittest + +from torch.testing._internal.inductor_utils import HAS_CUDA, HAS_GPU +from torch.utils._triton import has_triton + + +requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") +requires_gpu = unittest.skipUnless(HAS_GPU, "requires gpu") + +if has_triton(): + import triton + from triton import language as tl + + # Define here so that multiple tests can take advantage of it + @triton.jit + def add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_with_optional_param( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + ARGS_PASSED: "tl.constexpr", + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + if ARGS_PASSED == "two": + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + else: + output = x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 128}, num_stages=4, num_warps=4), + triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4), + ], + key=[], + ) + @triton.jit + def add_kernel_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 16}, num_stages=2, num_warps=2), + ], + key=[], + ) + @triton.jit + def add_kernel_autotuned_weird_param_order( + in_ptr0, + in_ptr1, + n_elements, + BLOCK_SIZE: "tl.constexpr", + out_ptr, + ): + # out_ptr is after an autotuned param that's declared as tl.constexpr. + # This param ordering can create bugs if not handled correctly. + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=4, num_warps=4 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=4, num_warps=4 + ), + ], + key=[], + ) + @triton.jit + def add_kernel_2d_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + x_elements, + y_elements, + BLOCK_SIZE_X: "tl.constexpr", + BLOCK_SIZE_Y: "tl.constexpr", + ): + xoffset = tl.program_id(0) * BLOCK_SIZE_X + xindex = xoffset + tl.arange(0, BLOCK_SIZE_X)[:, None] + xmask = xindex < x_elements + yoffset = tl.program_id(1) * BLOCK_SIZE_Y + yindex = yoffset + tl.arange(0, BLOCK_SIZE_Y)[None, :] + ymask = yindex < y_elements + x1 = xindex + y0 = yindex + tmp0 = tl.load(in_ptr0 + (x1 + (x_elements * y0)), xmask & ymask) + tmp1 = tl.load(in_ptr0 + (y0 + (y_elements * x1)), xmask & ymask) + tmp2 = tmp0 + tmp1 + tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask) + + def _dummy_early_config_prune(configs, *_, **__): + return configs + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4), + ], + key=[], + warmup=10, + rep=20, + prune_configs_by={"early_config_prune": _dummy_early_config_prune}, + ) + @triton.jit + def add_kernel_autotuned_with_unsupported_args( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_with_scaling( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + scaling_factor, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = (x + y) * scaling_factor + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + output = 2 * x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_inplace_kernel( + ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(ptr + offsets, mask=mask) + output = 2 * x + tl.store(ptr + offsets, output, mask=mask) + + @triton.jit + def zero_negs(x): + return tl.where(x >= 0, x, 0) + + @triton.jit + def indirection_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ACTIVATION: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + if ACTIVATION == "mul2_inplace_kernel": + mul2_inplace_kernel(in_ptr0, n_elements, BLOCK_SIZE=BLOCK_SIZE) + elif ACTIVATION == "add_kernel": + add_kernel(in_ptr0, in_ptr0, out_ptr, n_elements, BLOCK_SIZE=BLOCK_SIZE) + x = tl.load(in_ptr0 + offsets, mask=mask) + tl.store(out_ptr + offsets, x, mask=mask) + + @triton.jit + def double_strided_kernel( + in_ptr, + out_ptr, + in_y_stride, + out_y_stride, + X_BLOCK_SIZE: "tl.constexpr", + Y_BLOCK_SIZE: "tl.constexpr", + ): + xid = tl.program_id(axis=0) + yid = tl.program_id(axis=1) + x_start = xid * X_BLOCK_SIZE + y_start = yid * Y_BLOCK_SIZE + x_offsets = x_start + tl.arange(0, X_BLOCK_SIZE) + y_offsets = y_start + tl.arange(0, Y_BLOCK_SIZE) + src_offsets = y_offsets[:, None] * in_y_stride + x_offsets[None, :] + dst_offsets = y_offsets[:, None] * out_y_stride + x_offsets[None, :] + src = tl.load(in_ptr + src_offsets) + tl.store(out_ptr + dst_offsets, src * 2.0) + + @triton.jit + def inline_asm_kernel(X, Y, Z, n: "tl.constexpr", BLOCK: "tl.constexpr"): + x = tl.load(X + tl.arange(0, BLOCK)) + y = tl.load(Y + tl.arange(0, BLOCK)) + s = tl.full([BLOCK], n, tl.int32) + z = tl.inline_asm_elementwise( + "shf.l.wrap.b32 $0, $1, $2, $3;", + "=r,r, r, r", + [x, y, s], + dtype=tl.int32, + is_pure=True, + pack=1, + ) + tl.store(Z + tl.arange(0, BLOCK), z) + + @triton.jit + def add_kernel_with_block_ptr( + x_ptr, + y_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + y = tl.load( + tl.make_block_ptr( + base=y_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + boundary_check=[0], + ) + output = x + y + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements], + strides=[1], + offsets=[block_start], + block_shape=[BLOCK_SIZE], + order=[0], + ), + output, + boundary_check=[0], + ) + + @triton.jit + def kernel_with_block_ptr_2d( + x_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + x = tl.load( + tl.make_block_ptr( + base=x_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + boundary_check=[0], + ) + output = x + tl.store( + tl.make_block_ptr( + base=output_ptr, + shape=[n_elements, 1], + strides=[1, 1], + offsets=[block_start, 0], + block_shape=[BLOCK_SIZE, 1], + order=[1, 0], + ), + output, + boundary_check=[0], + ) + + from triton.language import load, store + + @triton.jit + def add_kernel_with_import( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = load(in_ptr0 + offsets, mask=mask) + y = load(in_ptr1 + offsets, mask=mask) + output = x + y + store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def cond_op_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + if tl.program_id(0) == 0: + output = x + y + else: + output = x * y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def atomic_add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.atomic_add(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_4_times_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + for i in range(2): + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + i = 2 + while i > 0: + i -= 1 + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_out_of_order_fn2( + in_ptr0, + in_ptr1, + n_elements, + out_ptr, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..66867eeac048fff52df80bd29b929abda368e080 --- /dev/null +++ b/infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py @@ -0,0 +1,86 @@ +# mypy: ignore-errors + +import torch +import torch.utils._pytree as pytree +from torch.utils._python_dispatch import return_and_correct_aliasing + + +# A simple tensor subclass that holds two tensors internally, and runs every op on both tensors. +class TwoTensor(torch.Tensor): + @staticmethod + def __new__(cls, a, b): + assert ( + a.device == b.device + and a.layout == b.layout + and a.requires_grad == b.requires_grad + and a.dtype == b.dtype + ) + # I guess it would be more accurate to represent the shape as torch.cat(a, b).shape + shape = a.shape + kwargs = {} + kwargs["strides"] = a.stride() + kwargs["storage_offset"] = a.storage_offset() + kwargs["device"] = a.device + kwargs["layout"] = a.layout + kwargs["requires_grad"] = a.requires_grad + kwargs["dtype"] = a.dtype + out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) + + assert a.shape == b.shape + assert a.stride() == b.stride() + assert a.storage_offset() == b.storage_offset() + return out + + def __init__(self, a, b): + self.a = a + self.b = b + + def __repr__(self): + a_repr = repr(self.a) + b_repr = repr(self.b) + return f"TwoTensor({a_repr}, {b_repr})" + + def __tensor_flatten__(self): + return ["a", "b"], None + + @staticmethod + def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): + assert meta is None + a, b = inner_tensors["a"], inner_tensors["b"] + return TwoTensor(a, b) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + if kwargs is None: + kwargs = {} + args_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, args) + args_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, args) + + kwargs_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, kwargs) + kwargs_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, kwargs) + + out_a = func(*args_a, **kwargs_a) + out_b = func(*args_b, **kwargs_b) + out_a_flat, spec = pytree.tree_flatten(out_a) + out_b_flat = pytree.tree_leaves(out_b) + # for aten ops that return non-tensors, just assume that + # our two inner tensors return the same value + out_flat = [ + TwoTensor(o_a, o_b) if isinstance(o_a, torch.Tensor) else o_a + for o_a, o_b in zip(out_a_flat, out_b_flat) + ] + out = pytree.tree_unflatten(out_flat, spec) + from torch._higher_order_ops.cond import cond_op + + if func is cond_op: + return out + else: + return return_and_correct_aliasing(func, args, kwargs, out) + + +class TwoTensorMode(torch.utils._python_dispatch.TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + out = func(*args, **kwargs) + if torch._subclasses.fake_tensor._is_tensor_constructor(func): + out = TwoTensor(out, out.clone()) + return out