ZTWHHH commited on
Commit
5bade23
·
verified ·
1 Parent(s): 0b02fd3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so +3 -0
  3. infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc +3 -0
  4. infer_4_47_1/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc +0 -0
  5. infer_4_47_1/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py +143 -0
  6. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__init__.py +86 -0
  7. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
  8. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py +28 -0
  9. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py +26 -0
  10. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py +133 -0
  11. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py +15 -0
  12. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py +29 -0
  13. infer_4_47_1/lib/python3.10/site-packages/torch/quantization/utils.py +29 -0
  14. infer_4_47_1/lib/python3.10/site-packages/torch/testing/__init__.py +5 -0
  15. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__init__.py +0 -0
  16. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py +474 -0
  17. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py +635 -0
  18. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py +165 -0
  19. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py +1 -0
  20. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  21. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py +111 -0
  22. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py +1422 -0
  23. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py +1532 -0
  24. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py +323 -0
  25. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py +0 -0
  26. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py +227 -0
  28. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py +581 -0
  29. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py +586 -0
  30. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py +67 -0
  31. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py +200 -0
  32. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py +1 -0
  33. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py +98 -0
  34. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
  35. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc +0 -0
  36. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py +136 -0
  37. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py +66 -0
  38. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py +42 -0
  39. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py +51 -0
  40. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py +0 -0
  41. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py +31 -0
  42. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py +543 -0
  43. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py +0 -0
  44. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py +0 -0
  45. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc +0 -0
  46. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py +181 -0
  47. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py +0 -0
  48. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py +266 -0
  49. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py +722 -0
  50. infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py +893 -0
.gitattributes CHANGED
@@ -1532,3 +1532,5 @@ janus/lib/libasan.so.6.0.0 filter=lfs diff=lfs merge=lfs -text
1532
  janus/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
1533
  janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text
1534
  infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
 
 
 
1532
  janus/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
1533
  janus/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text
1534
  infer_4_37_2/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
1535
+ infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1536
+ infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
infer_4_47_1/lib/python3.10/site-packages/msgpack/_cmsgpack.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f87ce97d46442abda5490c80742c2ce65e1c06782a94d0a03f60c4e678c5649
3
+ size 1188120
infer_4_47_1/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1f07687d3874ad47493ac795f6d7e7a86815e211a1892f75efdf76ac80b2b55
3
+ size 142787
infer_4_47_1/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import time
3
+ from collections import defaultdict
4
+ from functools import partial
5
+ from typing import DefaultDict
6
+
7
+ import torch
8
+
9
+
10
+ # Unfortunately it doesn't seem as if there was any way to get TensorBoard to do
11
+ # anything without having TF installed, and so this file has a hard dependency on it
12
+ # as well. It really is a debugging tool, so it doesn't matter.
13
+ try:
14
+ from tensorflow.core.util import event_pb2
15
+ from tensorflow.core.framework import graph_pb2
16
+ from tensorflow.python.summary.writer.writer import FileWriter
17
+ except ImportError:
18
+ raise ImportError("TensorBoard visualization of GraphExecutors requires having "
19
+ "TensorFlow installed") from None
20
+
21
+
22
+ def dump_tensorboard_summary(graph_executor, logdir):
23
+ with FileWriter(logdir) as w:
24
+ pb_graph = visualize(graph_executor)
25
+ evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString())
26
+ w.add_event(evt)
27
+
28
+
29
+ def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):
30
+ """Visualizes an independent graph, or a graph executor."""
31
+ value_map = {}
32
+ pb_graph = pb_graph or graph_pb2.GraphDef()
33
+
34
+ if isinstance(graph, torch._C.GraphExecutorState):
35
+ visualize_graph_executor(graph, name_prefix, pb_graph,
36
+ partial(visualize, pb_graph=pb_graph))
37
+ return pb_graph
38
+
39
+ # Set up an input node
40
+ input_node = pb_graph.node.add(op='input', name=name_prefix + 'input')
41
+ for i, value in enumerate(graph.param_node().outputs()):
42
+ value_map[value.unique()] = name_prefix + 'input:' + str(i)
43
+
44
+ visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)
45
+
46
+ # Gather all outputs
47
+ return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')
48
+ for value in graph.return_node().inputs():
49
+ return_node.input.append(value_map[value.unique()])
50
+
51
+ return pb_graph
52
+
53
+
54
+ def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
55
+ """Append the state of a given GraphExecutor to the graph protobuf.
56
+
57
+ Args:
58
+ state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
59
+ name_prefix (str): Name prefix of the containing subgraph.
60
+ pb_graph (GraphDef): graph to append to.
61
+ inline_graph (Callable): a function that handles setting up a value_map,
62
+ so that some graphs in here can be inlined. This is necessary, because
63
+ this will simply be `visualize` for the top-level GraphExecutor,
64
+ or `inline_graph` for all nested ones.
65
+
66
+ The signature should look like (Graph, name_prefix) -> ().
67
+ It will be called exactly once.
68
+
69
+ The strategy is to embed all different configurations as independent subgraphs,
70
+ while inlining the original graph as the one that actually produces the values.
71
+ """
72
+ if state.autograd_fallback_graph is not None:
73
+ visualize(graph=state.autograd_fallback_graph,
74
+ name_prefix=name_prefix + 'autograd_fallback/',
75
+ pb_graph=pb_graph,
76
+ executors_it=iter(state.autograd_fallback.executors()))
77
+
78
+ for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):
79
+ subgraph_name = name_prefix + f'plan{i}/'
80
+
81
+ # Create a disconnected node that will keep information regarding the input
82
+ # types of this trace. This is unfortunately a bit too verbose to be included
83
+ # in the subgraph name.
84
+ input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)
85
+ input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')
86
+
87
+ visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))
88
+
89
+ # Show gradient as an independent subgraph of this plan
90
+ if plan.grad_executor is not None:
91
+ grad_subgraph_name = subgraph_name + 'grad/'
92
+ visualize(plan.grad_executor, grad_subgraph_name, pb_graph)
93
+
94
+ return inline_graph(state.graph, name_prefix + 'original/')
95
+
96
+
97
+ def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None):
98
+ """Recursive part of visualize (basically skips setting up the input and output nodes)."""
99
+ def inline_graph(subgraph, name, node):
100
+ rec_value_map = {inp.unique(): value_map[val.unique()]
101
+ for inp, val in zip(subgraph.inputs(), node.inputs())}
102
+ visualize_rec(graph=subgraph,
103
+ value_map=rec_value_map,
104
+ name_prefix=name,
105
+ pb_graph=pb_graph)
106
+ for out, val in zip(subgraph.outputs(), node.outputs()):
107
+ value_map[val.unique()] = rec_value_map[out.unique()]
108
+
109
+ op_id_counter: DefaultDict[str, int] = defaultdict(int)
110
+
111
+ def name_for(node):
112
+ kind = node.kind()[node.kind().index('::') + 2:]
113
+ op_id_counter[kind] += 1
114
+ return kind, name_prefix + kind + '_' + str(op_id_counter[kind])
115
+
116
+ def add_fusion_group(node):
117
+ op, name = name_for(node)
118
+ inline_graph(node.g('Subgraph'), name + '/', node)
119
+
120
+ def add_graph_executor(node):
121
+ op, name = name_for(node)
122
+ if executors_it is None:
123
+ add_node(node)
124
+ else:
125
+ ge = next(executors_it)
126
+ visualize_graph_executor(ge, name + '/', pb_graph,
127
+ partial(inline_graph, node=node))
128
+
129
+ def add_node(node):
130
+ if node.kind() == 'prim::FusionGroup':
131
+ return add_fusion_group(node)
132
+ elif node.kind() == 'prim::GraphExecutor':
133
+ return add_graph_executor(node)
134
+ op, name = name_for(node)
135
+ pb_node = pb_graph.node.add(op=op, name=name)
136
+ for value in node.inputs():
137
+ pb_node.input.append(value_map[value.unique()])
138
+ # TODO: handle attrs
139
+ for i, value in enumerate(node.outputs()):
140
+ value_map[value.unique()] = name + ':' + str(i)
141
+
142
+ for node in graph.nodes():
143
+ add_node(node)
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__init__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ from .fake_quantize import * # noqa: F403
3
+ from .fuse_modules import fuse_modules
4
+ from .fuser_method_mappings import * # noqa: F403
5
+ from .observer import * # noqa: F403
6
+ from .qconfig import * # noqa: F403
7
+ from .quant_type import * # noqa: F403
8
+ from .quantization_mappings import * # noqa: F403
9
+ from .quantize import * # noqa: F403
10
+ from .quantize_jit import * # noqa: F403
11
+ from .stubs import * # noqa: F403
12
+
13
+
14
+ def default_eval_fn(model, calib_data):
15
+ r"""
16
+ Default evaluation function takes a torch.utils.data.Dataset or a list of
17
+ input Tensors and run the model on the dataset
18
+ """
19
+ for data, target in calib_data:
20
+ model(data)
21
+
22
+
23
+ __all__ = [
24
+ "QuantWrapper",
25
+ "QuantStub",
26
+ "DeQuantStub",
27
+ # Top level API for eager mode quantization
28
+ "quantize",
29
+ "quantize_dynamic",
30
+ "quantize_qat",
31
+ "prepare",
32
+ "convert",
33
+ "prepare_qat",
34
+ # Top level API for graph mode quantization on TorchScript
35
+ "quantize_jit",
36
+ "quantize_dynamic_jit",
37
+ "_prepare_ondevice_dynamic_jit",
38
+ "_convert_ondevice_dynamic_jit",
39
+ "_quantize_ondevice_dynamic_jit",
40
+ # Top level API for graph mode quantization on GraphModule(torch.fx)
41
+ # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
42
+ # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
43
+ "QuantType", # quantization type
44
+ # custom module APIs
45
+ "get_default_static_quant_module_mappings",
46
+ "get_static_quant_module_class",
47
+ "get_default_dynamic_quant_module_mappings",
48
+ "get_default_qat_module_mappings",
49
+ "get_default_qconfig_propagation_list",
50
+ "get_default_compare_output_module_list",
51
+ "get_quantized_operator",
52
+ "get_fuser_method",
53
+ # Sub functions for `prepare` and `swap_module`
54
+ "propagate_qconfig_",
55
+ "add_quant_dequant",
56
+ "swap_module",
57
+ "default_eval_fn",
58
+ # Observers
59
+ "ObserverBase",
60
+ "WeightObserver",
61
+ "HistogramObserver",
62
+ "observer",
63
+ "default_observer",
64
+ "default_weight_observer",
65
+ "default_placeholder_observer",
66
+ "default_per_channel_weight_observer",
67
+ # FakeQuantize (for qat)
68
+ "default_fake_quant",
69
+ "default_weight_fake_quant",
70
+ "default_fixed_qparams_range_neg1to1_fake_quant",
71
+ "default_fixed_qparams_range_0to1_fake_quant",
72
+ "default_per_channel_weight_fake_quant",
73
+ "default_histogram_fake_quant",
74
+ # QConfig
75
+ "QConfig",
76
+ "default_qconfig",
77
+ "default_dynamic_qconfig",
78
+ "float16_dynamic_qconfig",
79
+ "float_qparams_weight_only_qconfig",
80
+ # QAT utilities
81
+ "default_qat_qconfig",
82
+ "prepare_qat",
83
+ "quantize_qat",
84
+ # module transformations
85
+ "fuse_modules",
86
+ ]
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc ADDED
Binary file (795 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite import (
11
+ _convert_tuple_to_list,
12
+ _dequantize_tensor_list,
13
+ _find_match,
14
+ _get_logger_dict_helper,
15
+ _is_identical_module_type,
16
+ compare_model_outputs,
17
+ compare_model_stub,
18
+ compare_weights,
19
+ get_logger_dict,
20
+ get_matching_activations,
21
+ Logger,
22
+ NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
23
+ OutputLogger,
24
+ prepare_model_outputs,
25
+ prepare_model_with_stubs,
26
+ Shadow,
27
+ ShadowLogger,
28
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite_fx import (
11
+ _add_loggers_impl,
12
+ _add_loggers_one_model,
13
+ _add_shadow_loggers_impl,
14
+ _extract_logger_info_one_model,
15
+ _extract_weights_impl,
16
+ _extract_weights_one_model,
17
+ add_loggers,
18
+ add_shadow_loggers,
19
+ extend_logger_results_with_comparison,
20
+ extract_logger_info,
21
+ extract_shadow_logger_info,
22
+ extract_weights,
23
+ NSTracer,
24
+ OutputLogger,
25
+ RNNReturnType,
26
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import torch
3
+
4
+
5
+ # Pack pairs of int4 values into int8, in row major order; first int4
6
+ # value goes into lower order bits, and second int4 value into higher
7
+ # order bits of resulting int8 value.
8
+ def pack_int4_to_int8(weight):
9
+ assert weight.dim() == 2
10
+ assert weight.shape[1] % 2 == 0
11
+ assert weight.dtype == torch.int8
12
+ return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF)
13
+
14
+
15
+ # Unpack quandruples of bits in int8 values into int4 values, in row
16
+ # major order; lower 4 bits go into first int4 value goes, and upper 4
17
+ # bits go into second int4 value.
18
+ def unpack_int8_to_int4(weight):
19
+ assert weight.dim() == 2
20
+ assert weight.dtype == torch.int8
21
+ return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view(
22
+ weight.shape[0], 2 * weight.shape[1]
23
+ )
24
+
25
+
26
+ # Transpose the weight matrix, and then reorder its elements according
27
+ # to underlying requirements of CUTLASS library, so that it could be
28
+ # used for CUTLASS-based mixed datatypes linear operation.
29
+ def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass(
30
+ weight, dtypeq, transpose=False
31
+ ):
32
+ assert weight.dim() == 2
33
+ assert weight.dtype == torch.int8
34
+ assert dtypeq == torch.int8 or dtypeq == torch.quint4x2
35
+ assert weight.device.type == "cuda"
36
+
37
+ device = weight.device
38
+
39
+ # subbyte_transpose
40
+ if not transpose:
41
+ if dtypeq == torch.int8:
42
+ outp = weight.T
43
+ elif dtypeq == torch.quint4x2:
44
+ outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T)
45
+ else:
46
+ outp = weight
47
+
48
+ ncols, nrows = outp.shape # type: ignore[possibly-undefined]
49
+ assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0
50
+ assert ncols % 64 == 0
51
+
52
+ # permute_B_rows_for_mixed_gemm
53
+ # (permute cols actually, as transpose is applied first here)
54
+ if dtypeq == torch.quint4x2:
55
+ cols_permuted = (
56
+ torch.tensor(
57
+ [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15],
58
+ device=device,
59
+ )
60
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
61
+ nrows // 16, 16
62
+ )
63
+ ).view(-1)
64
+ else:
65
+ cols_permuted = (
66
+ torch.tensor(
67
+ [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15],
68
+ device=device,
69
+ )
70
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
71
+ nrows // 16, 16
72
+ )
73
+ ).view(-1)
74
+ outp = outp.index_copy(1, cols_permuted, outp)
75
+
76
+ # interleave_column_major_tensor
77
+ magic0 = 4 if dtypeq == torch.quint4x2 else 2
78
+ magic1 = 32 // magic0
79
+
80
+ tmp0 = (
81
+ (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0))
82
+ .view(-1, 1)
83
+ .repeat(1, nrows // 4 * magic0)
84
+ .view(-1)
85
+ )
86
+ tmp1 = (
87
+ (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1))
88
+ .view(-1, 1)
89
+ .repeat(1, magic1)
90
+ .view(-1)
91
+ .repeat(ncols)
92
+ )
93
+ tmp2 = (
94
+ (torch.arange(0, magic0, device=device) * magic1)
95
+ .view(-1, 1)
96
+ .repeat(1, nrows // 4)
97
+ .view(-1)
98
+ .repeat(ncols // magic0)
99
+ )
100
+ tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1)
101
+
102
+ outp_offsets = tmp0 + tmp1 + tmp2 + tmp3
103
+
104
+ tmp = outp.view(-1).view(torch.int32)
105
+ outp = torch.zeros_like(tmp)
106
+ outp.scatter_(0, outp_offsets, tmp)
107
+ outp = outp.view(weight.dtype)
108
+
109
+ # add_bias_and_interleave_quantized_tensor_inplace
110
+ tmp = outp.view(-1)
111
+
112
+ outp = torch.empty_like(tmp)
113
+ if dtypeq == torch.int8:
114
+ tmp = (tmp.to(torch.int) + 128).to(tmp.dtype)
115
+ outp[0::4] = tmp[0::4]
116
+ outp[1::4] = tmp[2::4]
117
+ outp[2::4] = tmp[1::4]
118
+ outp[3::4] = tmp[3::4]
119
+ elif dtypeq == torch.quint4x2:
120
+ tmp0 = ((tmp & 0xF) + 8) & 0xF
121
+ tmp0 = (tmp0[1::2] << 4) | tmp0[0::2]
122
+ tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF
123
+ tmp1 = (tmp1[1::2] << 4) | tmp1[0::2]
124
+ outp[0::4] = tmp0[0::2]
125
+ outp[1::4] = tmp0[1::2]
126
+ outp[2::4] = tmp1[0::2]
127
+ outp[3::4] = tmp1[1::2]
128
+
129
+ if dtypeq == torch.quint4x2:
130
+ nrows *= 2
131
+ ncols //= 2
132
+
133
+ return outp.view(nrows, ncols).view(torch.uint8)
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement
7
+ here.
8
+ """
9
+ from torch.ao.quantization.fuser_method_mappings import (
10
+ _DEFAULT_OP_LIST_TO_FUSER_METHOD,
11
+ fuse_conv_bn,
12
+ fuse_conv_bn_relu,
13
+ fuse_linear_bn,
14
+ get_fuser_method,
15
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/quantization_mappings.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/quantization_mappings.py`, while adding an import statement
7
+ here.
8
+ """
9
+ from torch.ao.quantization.quantization_mappings import (
10
+ _get_special_act_post_process,
11
+ _has_special_act_post_process,
12
+ _INCLUDE_QCONFIG_PROPAGATE_LIST,
13
+ DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
14
+ DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
15
+ DEFAULT_MODULE_TO_ACT_POST_PROCESS,
16
+ DEFAULT_QAT_MODULE_MAPPINGS,
17
+ DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
18
+ DEFAULT_STATIC_QUANT_MODULE_MAPPINGS,
19
+ get_default_compare_output_module_list,
20
+ get_default_dynamic_quant_module_mappings,
21
+ get_default_float_to_quantized_operator_mappings,
22
+ get_default_qat_module_mappings,
23
+ get_default_qconfig_propagation_list,
24
+ get_default_static_quant_module_mappings,
25
+ get_dynamic_quant_module_class,
26
+ get_quantized_operator,
27
+ get_static_quant_module_class,
28
+ no_observer_set,
29
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/quantization/utils.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ Utils shared by different modes of quantization (eager/graph)
4
+
5
+ This file is in the process of migration to `torch/ao/quantization`, and
6
+ is kept here for compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ `torch/ao/quantization/utils.py`, while adding an import statement
9
+ here.
10
+ """
11
+
12
+ from torch.ao.quantization.utils import (
13
+ activation_dtype,
14
+ activation_is_int8_quantized,
15
+ activation_is_statically_quantized,
16
+ calculate_qmin_qmax,
17
+ check_min_max_valid,
18
+ get_combined_dict,
19
+ get_qconfig_dtypes,
20
+ get_qparam_dict,
21
+ get_quant_type,
22
+ get_swapped_custom_module_class,
23
+ getattr_from_fqn,
24
+ is_per_channel,
25
+ is_per_tensor,
26
+ weight_dtype,
27
+ weight_is_quantized,
28
+ weight_is_statically_quantized,
29
+ )
infer_4_47_1/lib/python3.10/site-packages/torch/testing/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from torch._C import FileCheck as FileCheck
2
+
3
+ from . import _utils
4
+ from ._comparison import assert_allclose, assert_close as assert_close
5
+ from ._creation import make_tensor as make_tensor
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import collections
4
+
5
+ import torch
6
+ from torch.testing._internal.common_utils import TEST_WITH_ROCM
7
+ from torch.testing._internal.common_utils import TestCase
8
+
9
+
10
+ class AutocastTestLists:
11
+ def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype):
12
+ input = (torch.randn((n, n), device=dev, dtype=torch.float32),)
13
+
14
+ hx = ((torch.randn((n, n), device=dev, dtype=torch.float32),
15
+ torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else
16
+ torch.randn((n, n), device=dev, dtype=torch.float32),)
17
+
18
+ weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih
19
+ torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh
20
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih
21
+ torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh
22
+
23
+ # returns args as a tuple
24
+ return input + hx + weights
25
+
26
+ # Supplies ops and arguments for test_autocast_* in test/test_cuda.py
27
+ def __init__(self, dev):
28
+ super().__init__()
29
+ n = 8
30
+ # Utility arguments, created as one-element tuples
31
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
32
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
33
+ pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
34
+ mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
35
+ mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
36
+ mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),)
37
+
38
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
39
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
40
+ torch.randn(dimset, dtype=torch.float32, device=dev))
41
+ for dimset in dimsets]
42
+ bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
43
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
44
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
45
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
46
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
47
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
48
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
49
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
50
+
51
+ # The lists below organize ops that autocast needs to test.
52
+ # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py.
53
+ # Each op is associated with a tuple of valid arguments.
54
+ # In addition, cudnn conv ops are not supported on ROCm and hence will
55
+ # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list.
56
+
57
+ # Some ops implement built-in type promotion. These don't need autocasting,
58
+ # but autocasting relies on their promotion, so we include tests to double-check.
59
+ self.torch_expect_builtin_promote = [
60
+ ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool),
61
+ ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool),
62
+ ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
63
+ ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool),
64
+ ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool),
65
+ ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool),
66
+ ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32),
67
+ ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32),
68
+ ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32),
69
+ ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
70
+ ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32),
71
+ ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32),
72
+ ]
73
+ self.methods_expect_builtin_promote = [
74
+ ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
75
+ ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
76
+ ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
77
+ ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
78
+ ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
79
+ ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool),
80
+ ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
81
+ ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
82
+ ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32),
83
+ ]
84
+
85
+ # The remaining lists organize ops that autocast treats explicitly.
86
+ self.torch_fp16 = [
87
+ # deprecated _convolution
88
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
89
+ (0, 0), 1, False, True, True)),
90
+ # the current _convolution
91
+ ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False,
92
+ (0, 0), 1, False, True, True, True)),
93
+ ("conv1d", conv_args_fp32[0]),
94
+ ("conv2d", conv_args_fp32[1]),
95
+ ("conv3d", conv_args_fp32[2]),
96
+ ("conv_tbc", conv_args_fp32[0] + bias_fp32),
97
+ ("conv_transpose1d", conv_args_fp32[0]),
98
+ ("conv_transpose2d", conv_args_fp32[1]),
99
+ ("conv_transpose3d", conv_args_fp32[2]),
100
+ ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)),
101
+ ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM),
102
+ ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1),
103
+ (1, 1), 1, False, True, True), TEST_WITH_ROCM),
104
+ ("prelu", pointwise0_fp32 + element0_fp32),
105
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
106
+ ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32),
107
+ ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32),
108
+ ("matmul", mat0_fp32 + mat1_fp32),
109
+ ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32),
110
+ ("mm", mat0_fp32 + mat1_fp32),
111
+ ("mv", mat0_fp32 + pointwise0_fp32),
112
+ ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32),
113
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
114
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
115
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
116
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
117
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
118
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
119
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
120
+ # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell.
121
+ # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
122
+ # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32),
123
+ ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)),
124
+ ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)),
125
+ ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
126
+ ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)),
127
+ ]
128
+ self.torch_fp32 = [
129
+ ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
130
+ ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)),
131
+ ("cosh", pointwise0_fp16),
132
+ ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)),
133
+ ("exp", pointwise0_fp16),
134
+ ("expm1", pointwise0_fp16),
135
+ ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
136
+ ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
137
+ ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)),
138
+ ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)),
139
+ ("reciprocal", pointwise0_fp16),
140
+ ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)),
141
+ ("sinh", pointwise0_fp16),
142
+ ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)),
143
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16),
144
+ ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)),
145
+ # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API.
146
+ ("softmax", pointwise0_fp16 + (0,)),
147
+ ("log_softmax", pointwise0_fp16 + (0,)),
148
+ ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)),
149
+ ("group_norm", mat0_fp16 + (1,)),
150
+ ("norm", pointwise0_fp16),
151
+ ("norm", pointwise0_fp16, {"dim": 0}),
152
+ # these need magma
153
+ # ("norm", mat0_fp16, {"p": "nuc"}),
154
+ # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}),
155
+ ("norm", pointwise0_fp16, {"p": 1}),
156
+ ("norm", pointwise0_fp16, {"p": 1, "dim": 0}),
157
+ ("cosine_similarity", mat0_fp16 + mat1_fp16),
158
+ ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
159
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16),
160
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16),
161
+ torch.tensor([1], device=dev, dtype=torch.int))),
162
+ ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)),
163
+ ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
164
+ ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)),
165
+ ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16),
166
+ ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)),
167
+ ("cumprod", pointwise0_fp16 + (0,)),
168
+ ("cumsum", pointwise0_fp16 + (0,)),
169
+ ("dist", pointwise0_fp16 + pointwise1_fp16),
170
+ ("pdist", mat0_fp16),
171
+ ("cdist", mat0_fp16 + mat1_fp16),
172
+ ("prod", pointwise0_fp16),
173
+ ("prod", pointwise0_fp16 + (0,)),
174
+ ("renorm", mat0_fp16 + (2, 0, 1.0)),
175
+ ("sum", pointwise0_fp16),
176
+ ("sum", mat0_fp16 + (1,)),
177
+ ("logsumexp", mat0_fp16 + (1,)),
178
+ ]
179
+ self.torch_need_autocast_promote = [
180
+ ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)),
181
+ ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16),
182
+ ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)),
183
+ ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev),
184
+ torch.randn((1, 2), dtype=torch.float32, device=dev),
185
+ torch.randn((1, 2, 2), dtype=torch.float16, device=dev),
186
+ torch.randn((1,), dtype=torch.float32, device=dev))),
187
+ ("cross", (torch.randn(3, dtype=torch.float32, device=dev),
188
+ torch.randn(3, dtype=torch.float16, device=dev))),
189
+ ("dot", pointwise0_fp16 + pointwise1_fp32),
190
+ ("vdot", pointwise0_fp16 + pointwise1_fp32),
191
+ ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev),
192
+ torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev),
193
+ 0, 0, False)),
194
+ ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),),
195
+ torch.randn(1, device=dev, dtype=torch.float16))),
196
+ ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),),
197
+ torch.randn(1, device=dev, dtype=torch.float32))),
198
+ ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev),
199
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
200
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev),
201
+ 0,
202
+ torch.randint(0, 2, (2, 2, 2), device=dev),
203
+ torch.randn((2, 2, 2), dtype=torch.float16, device=dev))),
204
+ ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev),
205
+ 0,
206
+ torch.randint(0, 2, (2, 2, 2), device=dev),
207
+ torch.randn((2, 2, 2), dtype=torch.float32, device=dev))),
208
+ ]
209
+ self.nn_fp16 = [
210
+ ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32),
211
+ ]
212
+ self.nn_fp32 = [
213
+ ("softplus", pointwise0_fp16),
214
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float),
215
+ torch.zeros((n,), device=dev, dtype=torch.long))),
216
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half),
217
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
218
+ ("l1_loss", mat0_fp16 + mat1_fp16),
219
+ ("smooth_l1_loss", mat0_fp16 + mat1_fp16),
220
+ ("mse_loss", mat0_fp16 + mat1_fp16),
221
+ ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
222
+ ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
223
+ ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
224
+ ]
225
+ self.linalg_fp16 = [
226
+ ("linalg_vecdot", mat0_fp32 + mat0_fp32),
227
+ ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)),
228
+ ]
229
+ self.methods_fp16 = [
230
+ ("__matmul__", mat0_fp32 + mat1_fp32)
231
+ ]
232
+ self.methods_fp32 = [
233
+ ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)),
234
+ ]
235
+ self.banned = [
236
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32),
237
+ torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn),
238
+ ]
239
+
240
+
241
+ class AutocastCPUTestLists:
242
+ # Supplies ops and arguments for test_autocast_* in test/test_cpu.py
243
+ def __init__(self, dev):
244
+ super().__init__()
245
+ n = 8
246
+ # Utility arguments, created as one-element tuples
247
+ pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
248
+ pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
249
+ pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),)
250
+ mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
251
+ mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
252
+ mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),)
253
+
254
+ pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
255
+ pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),)
256
+
257
+ dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n))
258
+
259
+ dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),)
260
+ for dimset in dummy_dimsets]
261
+
262
+ dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n))
263
+ conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),
264
+ torch.randn(dimset, dtype=torch.bfloat16, device=dev))
265
+ for dimset in dimsets]
266
+ conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),
267
+ torch.randn(dimset, dtype=torch.float32, device=dev))
268
+ for dimset in dimsets]
269
+
270
+ bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),)
271
+ element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),)
272
+ pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
273
+ pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),)
274
+ mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
275
+ mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
276
+ mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
277
+ mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),)
278
+
279
+ dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),)
280
+ for dimset in dummy_dimsets]
281
+ # The lists below organize ops that autocast needs to test.
282
+ # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py.
283
+ # Each op is associated with a tuple of valid arguments.
284
+
285
+ # Some ops implement built-in type promotion. These don't need autocasting,
286
+ # but autocasting relies on their promotion, so we include tests to double-check.
287
+ self.torch_expect_builtin_promote = [
288
+ ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
289
+ ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
290
+ ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
291
+ ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
292
+ ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
293
+ ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
294
+ ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
295
+ ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
296
+ ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
297
+ ]
298
+
299
+ self.methods_expect_builtin_promote = [
300
+ ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
301
+ ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
302
+ ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
303
+ ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
304
+ ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
305
+ ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool),
306
+ ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
307
+ ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
308
+ ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32),
309
+ ]
310
+ # The remaining lists organize ops that autocast treats explicitly.
311
+ self.torch_16 = [
312
+ ("conv1d", conv_args_fp32[0]),
313
+ ("conv2d", conv_args_fp32[1]),
314
+ ("conv3d", conv_args_fp32[2]),
315
+ ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
316
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
317
+ ("mm", mat0_fp32 + mat1_fp32),
318
+ ("matmul", mat0_fp32 + mat1_fp32),
319
+ ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
320
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
321
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
322
+ ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32),
323
+ ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32),
324
+ torch.randn((n, n, n), device=dev, dtype=torch.float32))),
325
+ ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32),
326
+ torch.randn((5, 3, 5), device=dev, dtype=torch.float32),
327
+ torch.randn(5, device=dev, dtype=torch.float32),
328
+ 0)),
329
+ ("conv_transpose1d", conv_args_fp32[0]),
330
+ ("conv_transpose2d", conv_args_fp32[1]),
331
+ ("conv_transpose3d", conv_args_fp32[2]),
332
+ ("prelu", pointwise0_fp32 + element0_fp32),
333
+ ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32),
334
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
335
+ torch.randn((n, n, n), device=dev, dtype=torch.float32),
336
+ n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32),
337
+ torch.randn((3 * n), device=dev, dtype=torch.float32),
338
+ torch.randn((n, n), device=dev, dtype=torch.float32),
339
+ torch.randn((n), device=dev, dtype=torch.float32))),
340
+ ]
341
+ self.torch_fp32 = [
342
+ ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))),
343
+ ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16),
344
+ torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16),
345
+ torch.tensor([1], device=dev, dtype=torch.int))),
346
+ ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)),
347
+ ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)),
348
+ ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16),
349
+ ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
350
+ ]
351
+ self.nn_16 = [
352
+ ("linear", mat0_fp32 + mat1_fp32, {}),
353
+ ]
354
+ self.nn_fp32 = [
355
+ ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}),
356
+ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) +
357
+ (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)),
358
+ ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}),
359
+ ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),
360
+ torch.zeros((n,), device=dev, dtype=torch.long))),
361
+ ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16),
362
+ torch.zeros((n, n, n), device=dev, dtype=torch.long))),
363
+ ("l1_loss", mat0_bf16 + mat1_bf16),
364
+ ("smooth_l1_loss", mat0_bf16 + mat1_bf16),
365
+ ("mse_loss", mat0_bf16 + mat1_bf16),
366
+ ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
367
+ ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)),
368
+ ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)),
369
+ ("huber_loss", mat0_bf16 + mat1_bf16),
370
+ ]
371
+ self.torch_need_autocast_promote = [
372
+ ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
373
+ ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)),
374
+ ]
375
+
376
+
377
+ class TestAutocast(TestCase):
378
+ def args_maybe_kwargs(self, op_with_args):
379
+ if len(op_with_args) == 2:
380
+ return op_with_args[0], op_with_args[1], {}
381
+ else:
382
+ return op_with_args[0], op_with_args[1], op_with_args[2]
383
+
384
+ def _run_autocast_outofplace(
385
+ self,
386
+ op,
387
+ args,
388
+ run_as_type,
389
+ device,
390
+ out_type=None,
391
+ module=torch,
392
+ add_kwargs=None,
393
+ amp_dtype=torch.bfloat16,
394
+ ):
395
+ # helper to cast args
396
+ def cast(val, to_type):
397
+ if isinstance(val, torch.Tensor):
398
+ return val.to(to_type) if val.is_floating_point() else val
399
+ elif isinstance(val, collections.abc.Iterable):
400
+ return type(val)(cast(v, to_type) for v in val)
401
+ else:
402
+ return val
403
+
404
+ if add_kwargs is None:
405
+ add_kwargs = {}
406
+
407
+ self.assertFalse(torch.is_autocast_enabled(device_type=device))
408
+ with torch.amp.autocast(device_type=device, dtype=amp_dtype):
409
+ self.assertTrue(torch.is_autocast_enabled(device_type=device))
410
+
411
+ out_type = out_type if out_type is not None else run_as_type
412
+ output = output_method = None
413
+
414
+ # Try module.* variant, if requested:
415
+ if module is not None and hasattr(module, op):
416
+ output = getattr(module, op)(*args, **add_kwargs)
417
+ if isinstance(output, torch.Tensor):
418
+ self.assertTrue(
419
+ out_type == output.dtype,
420
+ f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}",
421
+ )
422
+ # Try Tensor.* variant:
423
+ if hasattr(torch.Tensor, op):
424
+ output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
425
+ if isinstance(output_method, torch.Tensor):
426
+ self.assertTrue(
427
+ out_type == output_method.dtype,
428
+ f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}",
429
+ )
430
+
431
+ self.assertTrue(
432
+ (output is not None) or (output_method is not None),
433
+ f"{op} not found as an attribute on either Tensor or the requested module {module}",
434
+ )
435
+
436
+ # Accounts for ops that return Tensors, iterables, and other non-Tensors.
437
+ # For example, lstm_cell returns a tuple and equal returns bool.
438
+ def compare(first, second):
439
+ if isinstance(first, torch.Tensor):
440
+ return torch.equal(first, second)
441
+ elif isinstance(first, collections.abc.Iterable):
442
+ return all(compare(f, s) for f, s in zip(first, second))
443
+ else:
444
+ return first == second
445
+
446
+ # If both torch.* and Tensor.* variants were found, check outputs are identical
447
+ if (output is not None) and (output_method is not None):
448
+ self.assertTrue(type(output) == type(output_method))
449
+ comparison = compare(output, output_method)
450
+ self.assertTrue(
451
+ comparison, f"torch.{op} result did not match Tensor.{op} result"
452
+ )
453
+
454
+ # Compare numerics to Python-side "autocasting" that (we expect) does the same thing
455
+ # as the C++-side autocasting, and should be bitwise accurate.
456
+ output_to_compare = output if output is not None else output_method
457
+ with torch.amp.autocast(device_type=device, enabled=False):
458
+ self.assertFalse(
459
+ torch.is_autocast_enabled(device_type=device)
460
+ )
461
+
462
+ if module is not None and hasattr(module, op):
463
+ control = getattr(module, op)(
464
+ *cast(args, run_as_type), **add_kwargs
465
+ )
466
+ else:
467
+ control = getattr(args[0].to(run_as_type), op)(
468
+ *cast(args[1:], run_as_type), **add_kwargs
469
+ )
470
+ self.assertTrue(type(output_to_compare) == type(control))
471
+ comparison = compare(output_to_compare, control)
472
+ self.assertTrue(comparison, f"torch.{op} result did not match control")
473
+ self.assertTrue(torch.is_autocast_enabled(device_type=device))
474
+ self.assertFalse(torch.is_autocast_enabled(device_type=device))
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from functools import partial
5
+ from torch.testing import make_tensor
6
+ from torch.testing._internal.opinfo.core import (
7
+ OpInfo,
8
+ SampleInput,
9
+ )
10
+ from torch.testing._internal.common_dtype import all_types_and
11
+ import numpy as np
12
+
13
+ # Note: [autograd.Function db]
14
+ #
15
+ # This is a collection of autograd.Function test cases written as OpInfos
16
+ # so they can easily be consumed by OpInfo-based tests to check if a subsystem
17
+ # supports autograd.Function.
18
+ #
19
+ # Axes:
20
+ # - saves {output, input, intermediate, non-tensor}
21
+ # - {inputs, output} x {single tensor, tensors, arbitrary objects}
22
+ # - Uses {mark_dirty, mark_non_differentiable, once_differentiable}
23
+
24
+
25
+ def to_numpy(tensor):
26
+ return tensor.cpu().numpy()
27
+
28
+
29
+ class NumpyCube(torch.autograd.Function):
30
+ @staticmethod
31
+ def forward(input):
32
+ input_np = to_numpy(input)
33
+ dinput = torch.tensor(3 * input_np ** 2, device=input.device)
34
+ return torch.tensor(input_np ** 3, device=input.device), dinput
35
+
36
+ @staticmethod
37
+ def setup_context(ctx, inputs, output):
38
+ ctx.save_for_backward(inputs[0], output[1])
39
+ ctx.save_for_forward(inputs[0], output[1])
40
+
41
+ @staticmethod
42
+ def backward(ctx, grad_output, grad_saved):
43
+ input, dinput = ctx.saved_tensors
44
+ return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input)
45
+
46
+ @staticmethod
47
+ def vmap(info, in_dims, input):
48
+ result = NumpyCube.apply(input)
49
+ return result, (in_dims[0], in_dims[0])
50
+
51
+ @staticmethod
52
+ def jvp(ctx, input_tangent):
53
+ input, dinput = ctx.saved_tensors
54
+ return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
55
+
56
+
57
+ class CubeGenVmap(torch.autograd.Function):
58
+ generate_vmap_rule = True
59
+
60
+ @staticmethod
61
+ def forward(x):
62
+ return x ** 3, 3 * x ** 2
63
+
64
+ @staticmethod
65
+ def setup_context(ctx, inputs, outputs):
66
+ ctx.save_for_backward(inputs[0], outputs[1])
67
+ ctx.save_for_forward(inputs[0], outputs[1])
68
+
69
+ @staticmethod
70
+ def backward(ctx, grad_output, grad_saved):
71
+ input, dinput = ctx.saved_tensors
72
+ result = grad_output * dinput + 6 * dinput
73
+ return result
74
+
75
+ @staticmethod
76
+ def jvp(ctx, input_tangent):
77
+ input, dinput = ctx.saved_tensors
78
+ return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input)
79
+
80
+
81
+ def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs):
82
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
83
+ yield SampleInput(make_arg(1, low=0.8, high=2), args=())
84
+
85
+
86
+ class NumpyCubeNotComposable(torch.autograd.Function):
87
+ @staticmethod
88
+ def forward(input):
89
+ input_np = to_numpy(input)
90
+ return torch.tensor(input_np ** 3, device=input.device), input_np
91
+
92
+ @staticmethod
93
+ def setup_context(ctx, inputs, output):
94
+ _, input_np = output
95
+ ctx.input_np = input_np
96
+ ctx.device = inputs[0].device
97
+
98
+ @staticmethod
99
+ @torch.autograd.function.once_differentiable
100
+ def backward(ctx, grad_output, grad_saved):
101
+ result_np = 3 * (ctx.input_np ** 2)
102
+ return torch.tensor(result_np, device=ctx.device)
103
+
104
+
105
+ class NumpyMul(torch.autograd.Function):
106
+ @staticmethod
107
+ def forward(x, y):
108
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
109
+
110
+ @staticmethod
111
+ def setup_context(ctx, inputs, output):
112
+ ctx.save_for_backward(*inputs)
113
+ ctx.save_for_forward(*inputs)
114
+
115
+ @staticmethod
116
+ def backward(ctx, grad_output):
117
+ x, y = ctx.saved_tensors
118
+ gx = None
119
+ if ctx.needs_input_grad[0]:
120
+ gx = NumpyMul.apply(grad_output, y)
121
+ gy = None
122
+ if ctx.needs_input_grad[1]:
123
+ gy = NumpyMul.apply(grad_output, x)
124
+ return gx, gy
125
+
126
+ @staticmethod
127
+ def vmap(info, in_dims, x, y):
128
+ x_bdim, y_bdim = in_dims
129
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
130
+ y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
131
+ result = NumpyMul.apply(x, y)
132
+ result = result.movedim(-1, 0)
133
+ return result, 0
134
+
135
+ @staticmethod
136
+ def jvp(ctx, x_tangent, y_tangent):
137
+ x, y = ctx.saved_tensors
138
+ return x_tangent * y + y_tangent * x
139
+
140
+ def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs):
141
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
142
+ # Broadcasting
143
+ yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),))
144
+
145
+ def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs):
146
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
147
+ yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14})
148
+
149
+ class MulGenVmap(torch.autograd.Function):
150
+ generate_vmap_rule = True
151
+
152
+ @staticmethod
153
+ def forward(x, y):
154
+ return x * y
155
+
156
+ @staticmethod
157
+ def setup_context(ctx, inputs, outputs):
158
+ ctx.save_for_backward(*inputs)
159
+ ctx.save_for_forward(*inputs)
160
+
161
+ @staticmethod
162
+ def backward(ctx, grad_output):
163
+ x, y = ctx.saved_tensors
164
+ gx = None
165
+ if ctx.needs_input_grad[0]:
166
+ gx = MulGenVmap.apply(grad_output, y)
167
+ gy = None
168
+ if ctx.needs_input_grad[1]:
169
+ gy = MulGenVmap.apply(grad_output, x)
170
+ return gx, gy
171
+
172
+ @staticmethod
173
+ def jvp(ctx, x_tangent, y_tangent):
174
+ x, y = ctx.saved_tensors
175
+ return x_tangent * y + y_tangent * x
176
+
177
+
178
+ class NumpyExp_(torch.autograd.Function):
179
+ @staticmethod
180
+ def forward(x):
181
+ x_np = to_numpy(x)
182
+ np.exp(x_np, x_np)
183
+ return x
184
+
185
+ @staticmethod
186
+ def setup_context(ctx, inputs, output):
187
+ x, = inputs
188
+ ctx.mark_dirty(x)
189
+ ctx.save_for_backward(output)
190
+ ctx.save_for_forward(output)
191
+
192
+ @staticmethod
193
+ def backward(ctx, grad_output):
194
+ output, = ctx.saved_tensors
195
+ return NumpyMul.apply(grad_output, output)
196
+
197
+ @staticmethod
198
+ def vmap(info, in_dims, x):
199
+ NumpyExp_.apply(x)
200
+ return x, in_dims[0]
201
+
202
+ @staticmethod
203
+ def jvp(ctx, x_tangent):
204
+ # Doesn't call numpy operations because I didn't want to write NumpyMul_
205
+ output, = ctx.saved_tensors
206
+ x_tangent.mul_(output)
207
+ return x_tangent
208
+
209
+ class NumpySort(torch.autograd.Function):
210
+ @staticmethod
211
+ def forward(x, dim):
212
+ device = x.device
213
+ x = to_numpy(x)
214
+ ind = np.argsort(x, axis=dim)
215
+ ind_inv = np.argsort(ind, axis=dim)
216
+ result = np.take_along_axis(x, ind, axis=dim)
217
+ return (
218
+ torch.tensor(x, device=device),
219
+ torch.tensor(ind, device=device),
220
+ torch.tensor(ind_inv, device=device),
221
+ )
222
+
223
+ @staticmethod
224
+ def setup_context(ctx, inputs, output):
225
+ x, dim = inputs
226
+ _, ind, ind_inv = output
227
+ ctx.mark_non_differentiable(ind, ind_inv)
228
+ ctx.save_for_backward(ind, ind_inv)
229
+ ctx.save_for_forward(ind, ind_inv)
230
+ ctx.dim = dim
231
+
232
+ @staticmethod
233
+ def backward(ctx, grad_output, _0, _1):
234
+ ind, ind_inv = ctx.saved_tensors
235
+ return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None
236
+
237
+ @staticmethod
238
+ def vmap(info, in_dims, x, dim):
239
+ x_bdim, _ = in_dims
240
+ x = x.movedim(x_bdim, 0)
241
+ # wrap dim
242
+ dim = dim if dim >= 0 else dim + x.dim() - 1
243
+ return NumpySort.apply(x, dim + 1), (0, 0, 0)
244
+
245
+ @staticmethod
246
+ def jvp(ctx, x_tangent, _):
247
+ ind, ind_inv = ctx.saved_tensors
248
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
249
+
250
+ class SortGenVmap(torch.autograd.Function):
251
+ generate_vmap_rule = True
252
+
253
+ @staticmethod
254
+ def forward(x, dim):
255
+ device = x.device
256
+ ind = torch.argsort(x, dim=dim)
257
+ ind_inv = torch.argsort(ind, axis=dim)
258
+ result = torch.take_along_dim(x, ind, dim=dim)
259
+ return result, ind, ind_inv
260
+
261
+ @staticmethod
262
+ def setup_context(ctx, inputs, outputs):
263
+ x, dim = inputs
264
+ _, ind, ind_inv = outputs
265
+ ctx.mark_non_differentiable(ind, ind_inv)
266
+ ctx.save_for_backward(ind, ind_inv)
267
+ ctx.save_for_forward(ind, ind_inv)
268
+ ctx.dim = dim
269
+
270
+ @staticmethod
271
+ def backward(ctx, grad_output, _0, _1):
272
+ ind, ind_inv = ctx.saved_tensors
273
+ return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None
274
+
275
+ @staticmethod
276
+ def jvp(ctx, x_tangent, _):
277
+ ind, ind_inv = ctx.saved_tensors
278
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None
279
+
280
+
281
+ def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs):
282
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
283
+ yield SampleInput(make_arg(3, 5), args=(1,))
284
+
285
+
286
+ def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs):
287
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
288
+ tensor = make_arg(3, 5)
289
+ dim = 1
290
+ _, ind, ind_inv = NumpySort.apply(tensor, 1)
291
+ yield SampleInput(tensor, args=(ind, ind_inv, dim))
292
+
293
+
294
+ class NumpyTake(torch.autograd.Function):
295
+ @staticmethod
296
+ def forward(x, ind, ind_inv, dim):
297
+ device = x.device
298
+ x = to_numpy(x)
299
+ ind = to_numpy(ind)
300
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
301
+
302
+ @staticmethod
303
+ def setup_context(ctx, inputs, output):
304
+ x, ind, ind_inv, dim = inputs
305
+ ctx.save_for_backward(ind, ind_inv)
306
+ ctx.save_for_forward(ind, ind_inv)
307
+ ctx.dim = dim
308
+
309
+ @staticmethod
310
+ def backward(ctx, grad_output):
311
+ ind, ind_inv = ctx.saved_tensors
312
+ result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
313
+ return result, None, None, None
314
+
315
+ @staticmethod
316
+ def vmap(info, in_dims, x, ind, ind_inv, dim):
317
+ x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
318
+
319
+ # wrap dim
320
+ logical_dim = x.dim() if x_bdim is None else x_bdim - 1
321
+ dim = dim if dim >= 0 else dim + logical_dim
322
+
323
+ def expand_bdim(x, x_bdim):
324
+ if x_bdim is None:
325
+ return x.expand(info.batch_size, *x.shape)
326
+ return x.movedim(x_bdim, 0)
327
+
328
+ x = expand_bdim(x, x_bdim)
329
+ ind = expand_bdim(ind, ind_bdim)
330
+ ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
331
+
332
+ return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
333
+
334
+ @staticmethod
335
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
336
+ assert ind_tangent is None
337
+ assert ind_inv_tangent is None
338
+ ind, ind_inv = ctx.saved_tensors
339
+ return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
340
+
341
+ class TakeGenVmap(torch.autograd.Function):
342
+ generate_vmap_rule = True
343
+
344
+ @staticmethod
345
+ def forward(x, ind, ind_inv, dim):
346
+ return torch.take_along_dim(x, ind, dim)
347
+
348
+ @staticmethod
349
+ def setup_context(ctx, inputs, outputs):
350
+ x, ind, ind_inv, dim = inputs
351
+ ctx.save_for_backward(ind, ind_inv)
352
+ ctx.save_for_forward(ind, ind_inv)
353
+ ctx.dim = dim
354
+
355
+ @staticmethod
356
+ def backward(ctx, grad_output):
357
+ ind, ind_inv = ctx.saved_tensors
358
+ result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim)
359
+ return result, None, None, None
360
+
361
+ @staticmethod
362
+ def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
363
+ ind, ind_inv = ctx.saved_tensors
364
+ return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim)
365
+
366
+ class Select(torch.autograd.Function):
367
+ @staticmethod
368
+ def forward(x, idx):
369
+ return x[idx]
370
+
371
+ @staticmethod
372
+ def setup_context(ctx, inputs, output):
373
+ x, idx = inputs
374
+ ctx.x_shape = x.shape
375
+ ctx.idx = idx
376
+
377
+ @staticmethod
378
+ def backward(ctx, grad_output):
379
+ result = grad_output.new_zeros(ctx.x_shape)
380
+ result[ctx.idx] = grad_output
381
+ return result, None
382
+
383
+ @staticmethod
384
+ def vmap(info, in_dims, x, idx):
385
+ x_bdim, _ = in_dims
386
+ x = x.movedim(x_bdim, 1)
387
+ return Select.apply(x, idx), 0
388
+
389
+ @staticmethod
390
+ def jvp(ctx, x_tangent, _):
391
+ return Select.apply(x_tangent, ctx.idx)
392
+
393
+ class SelectGenVmap(torch.autograd.Function):
394
+ generate_vmap_rule = True
395
+
396
+ @staticmethod
397
+ def forward(x, idx):
398
+ return x[idx]
399
+
400
+ @staticmethod
401
+ def setup_context(ctx, inputs, outputs):
402
+ x, idx = inputs
403
+ ctx.x_shape = x.shape
404
+ ctx.idx = idx
405
+
406
+ @staticmethod
407
+ def backward(ctx, grad_output):
408
+ result = grad_output.new_zeros(ctx.x_shape)
409
+ result[ctx.idx] = grad_output
410
+ return result, None
411
+
412
+ @staticmethod
413
+ def jvp(ctx, x_tangent, _):
414
+ return SelectGenVmap.apply(x_tangent, ctx.idx)
415
+
416
+
417
+ def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs):
418
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
419
+ yield SampleInput(make_arg(3, 5), args=(2,))
420
+
421
+ class ScaleGradGenVmap(torch.autograd.Function):
422
+ generate_vmap_rule = True
423
+ scale = 3.14
424
+
425
+ @staticmethod
426
+ def forward(x):
427
+ return x.clone()
428
+
429
+ @staticmethod
430
+ def setup_context(ctx, inputs, outputs):
431
+ pass
432
+
433
+ @staticmethod
434
+ def backward(ctx, grad_output):
435
+ return grad_output * ScaleGradGenVmap.scale
436
+
437
+ @staticmethod
438
+ def jvp(ctx, x_tangent):
439
+ return x_tangent * ScaleGradGenVmap.scale
440
+
441
+ class ZeroGradientsGenVmap(torch.autograd.Function):
442
+ generate_vmap_rule = True
443
+
444
+ @staticmethod
445
+ def forward(x, y):
446
+ return x.clone(), y.clone()
447
+
448
+ @staticmethod
449
+ def setup_context(ctx, inputs, outputs):
450
+ pass
451
+
452
+ @staticmethod
453
+ def backward(ctx, gx, gy):
454
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
455
+ # Also intentionally not None.
456
+ return (
457
+ # Intentionally too-large gradient
458
+ torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device),
459
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
460
+ )
461
+
462
+ @staticmethod
463
+ def jvp(ctx, gx, gy):
464
+ # Intentionally returning torch.zeros instead of zeros_like or new_zeros.
465
+ # Also intentionally not None.
466
+ return (
467
+ torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device),
468
+ torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device),
469
+ )
470
+
471
+
472
+ def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs):
473
+ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
474
+ yield SampleInput(make_arg(3, 5))
475
+
476
+
477
+ class ForwardHasDefaultArgs(torch.autograd.Function):
478
+ @staticmethod
479
+ def forward(x, idx=(2,)):
480
+ return x[idx]
481
+
482
+ @staticmethod
483
+ def setup_context(ctx, inputs, output):
484
+ x, idx = inputs
485
+ ctx.x_shape = x.shape
486
+ ctx.idx = idx
487
+
488
+ @staticmethod
489
+ def backward(ctx, grad_output):
490
+ result = grad_output.new_zeros(ctx.x_shape)
491
+ result[ctx.idx] = grad_output
492
+ return result, None
493
+
494
+ @staticmethod
495
+ def vmap(info, in_dims, x, idx):
496
+ x_bdim, _ = in_dims
497
+ x = x.movedim(x_bdim, 1)
498
+ return ForwardHasDefaultArgs.apply(x, idx), 0
499
+
500
+ @staticmethod
501
+ def jvp(ctx, x_tangent, _):
502
+ return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
503
+
504
+
505
+ autograd_function_db = [
506
+ OpInfo(
507
+ 'NumpyCubeAutogradFunction',
508
+ op=NumpyCube.apply,
509
+ supports_forward_ad=True,
510
+ supports_fwgrad_bwgrad=True,
511
+ sample_inputs_func=sample_inputs_numpy_cube,
512
+ dtypes=all_types_and(torch.bool, torch.half),
513
+ supports_out=False,
514
+ ),
515
+ OpInfo(
516
+ 'NumpyExpMarkDirtyAutogradFunction',
517
+ op=lambda x: NumpyExp_.apply(x.clone()),
518
+ inplace_variant=NumpyExp_.apply,
519
+ supports_forward_ad=True,
520
+ supports_fwgrad_bwgrad=True,
521
+ sample_inputs_func=sample_inputs_numpy_cube,
522
+ dtypes=all_types_and(torch.bool, torch.half),
523
+ supports_out=False,
524
+ ),
525
+ OpInfo(
526
+ 'NumpyMulAutogradFunction',
527
+ op=NumpyMul.apply,
528
+ supports_forward_ad=True,
529
+ supports_fwgrad_bwgrad=True,
530
+ sample_inputs_func=sample_inputs_numpy_mul,
531
+ dtypes=all_types_and(torch.bool, torch.half),
532
+ supports_out=False,
533
+ ),
534
+ OpInfo(
535
+ 'NumpyCubeNotComposableAutogradFunction',
536
+ op=lambda x: NumpyCubeNotComposable.apply(x)[0],
537
+ supports_forward_ad=False,
538
+ supports_fwgrad_bwgrad=False,
539
+ sample_inputs_func=sample_inputs_numpy_cube,
540
+ dtypes=all_types_and(torch.bool, torch.half),
541
+ supports_out=False,
542
+ ),
543
+ OpInfo(
544
+ 'NumpySortAutogradFunction',
545
+ op=NumpySort.apply,
546
+ supports_forward_ad=False,
547
+ supports_fwgrad_bwgrad=False,
548
+ sample_inputs_func=sample_inputs_numpy_sort,
549
+ dtypes=all_types_and(torch.bool, torch.half),
550
+ supports_out=False,
551
+ gradcheck_wrapper=lambda y, ind: y,
552
+ ),
553
+ OpInfo(
554
+ 'NumpyTakeAutogradFunction',
555
+ op=NumpyTake.apply,
556
+ supports_forward_ad=False,
557
+ supports_fwgrad_bwgrad=False,
558
+ sample_inputs_func=sample_inputs_numpy_take,
559
+ dtypes=all_types_and(torch.bool, torch.half),
560
+ supports_out=False,
561
+ ),
562
+ OpInfo(
563
+ 'SelectAutogradFunction',
564
+ op=Select.apply,
565
+ supports_forward_ad=True,
566
+ supports_fwgrad_bwgrad=True,
567
+ sample_inputs_func=sample_inputs_select,
568
+ dtypes=all_types_and(torch.bool, torch.half),
569
+ supports_out=False,
570
+ ),
571
+ OpInfo(
572
+ 'CubeGenVmapAutogradFunction',
573
+ op=CubeGenVmap.apply,
574
+ supports_forward_ad=True,
575
+ supports_fwgrad_bwgrad=True,
576
+ sample_inputs_func=sample_inputs_numpy_cube,
577
+ dtypes=all_types_and(torch.bool, torch.half),
578
+ supports_out=False,
579
+ ),
580
+ OpInfo(
581
+ 'MulGenVmapAutogradFunction',
582
+ op=MulGenVmap.apply,
583
+ supports_forward_ad=True,
584
+ supports_fwgrad_bwgrad=True,
585
+ sample_inputs_func=sample_inputs_numpy_mul,
586
+ dtypes=all_types_and(torch.bool, torch.half),
587
+ supports_out=False,
588
+ ),
589
+ OpInfo(
590
+ 'SortGenVmapAutogradFunction',
591
+ op=SortGenVmap.apply,
592
+ supports_forward_ad=True,
593
+ supports_fwgrad_bwgrad=True,
594
+ sample_inputs_func=sample_inputs_numpy_sort,
595
+ dtypes=all_types_and(torch.bool, torch.half),
596
+ supports_out=False,
597
+ gradcheck_wrapper=lambda y, ind: y,
598
+ ),
599
+ OpInfo(
600
+ 'SelectGenVmapAutogradFunction',
601
+ op=SelectGenVmap.apply,
602
+ supports_forward_ad=True,
603
+ supports_fwgrad_bwgrad=True,
604
+ sample_inputs_func=sample_inputs_select,
605
+ dtypes=all_types_and(torch.bool, torch.half),
606
+ supports_out=False,
607
+ ),
608
+ OpInfo(
609
+ 'ScaleGradGenVmapAutogradFunction',
610
+ op=ScaleGradGenVmap.apply,
611
+ supports_forward_ad=True,
612
+ supports_fwgrad_bwgrad=True,
613
+ sample_inputs_func=sample_inputs_numpy_cube,
614
+ dtypes=all_types_and(torch.bool, torch.half),
615
+ supports_out=False,
616
+ ),
617
+ OpInfo(
618
+ 'ZeroGradientsGenVmapAutogradFunction',
619
+ op=ZeroGradientsGenVmap.apply,
620
+ supports_forward_ad=True,
621
+ supports_fwgrad_bwgrad=True,
622
+ sample_inputs_func=sample_inputs_numpy_mul,
623
+ dtypes=all_types_and(torch.bool, torch.half),
624
+ supports_out=False,
625
+ ),
626
+ OpInfo(
627
+ 'ForwardHasDefaultArgsAutogradFunction',
628
+ op=ForwardHasDefaultArgs.apply,
629
+ supports_forward_ad=True,
630
+ supports_fwgrad_bwgrad=True,
631
+ sample_inputs_func=sample_inputs_forward_default_args,
632
+ dtypes=all_types_and(torch.bool, torch.half),
633
+ supports_out=False,
634
+ ),
635
+ ]
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import os
4
+ import re
5
+ import sys
6
+ from typing import List
7
+
8
+ __all__ = [
9
+ "check_code_for_cuda_kernel_launches",
10
+ "check_cuda_kernel_launches",
11
+ ]
12
+
13
+ # FILES TO EXCLUDE (match is done with suffix using `endswith`)
14
+ # You wouldn't drive without a seatbelt, though, so why would you
15
+ # launch a kernel without some safety? Use this as a quick workaround
16
+ # for a problem with the checker, fix the checker, then de-exclude
17
+ # the files in question.
18
+ exclude_files: List[str] = []
19
+
20
+ # Without using a C++ AST we can't 100% detect kernel launches, so we
21
+ # model them as having the pattern "<<<parameters>>>(arguments);"
22
+ # We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be
23
+ # the next statement.
24
+ #
25
+ # We model the next statement as ending at the next `}` or `;`.
26
+ # If we see `}` then a clause ended (bad) if we see a semi-colon then
27
+ # we expect the launch check just before it.
28
+ #
29
+ # Since the kernel launch can include lambda statements, it's important
30
+ # to find the correct end-paren of the kernel launch. Doing this with
31
+ # pure regex requires recursive regex, which aren't part of the Python
32
+ # standard library. To avoid an additional dependency, we build a prefix
33
+ # regex that finds the start of a kernel launch, use a paren-matching
34
+ # algorithm to find the end of the launch, and then another regex to
35
+ # determine if a launch check is present.
36
+
37
+ # Finds potential starts of kernel launches
38
+ kernel_launch_start = re.compile(
39
+ r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE
40
+ )
41
+
42
+ # This pattern should start at the character after the final paren of the
43
+ # kernel launch. It returns a match if the launch check is not the next statement
44
+ has_check = re.compile(
45
+ r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE
46
+ )
47
+
48
+ def find_matching_paren(s: str, startpos: int) -> int:
49
+ """Given a string "prefix (unknown number of characters) suffix"
50
+ and the position of the first `(` returns the index of the character
51
+ 1 past the `)`, accounting for paren nesting
52
+ """
53
+ opening = 0
54
+ for i, c in enumerate(s[startpos:]):
55
+ if c == '(':
56
+ opening += 1
57
+ elif c == ')':
58
+ opening -= 1
59
+ if opening == 0:
60
+ return startpos + i + 1
61
+
62
+ raise IndexError("Closing parens not found!")
63
+
64
+
65
+ def should_exclude_file(filename) -> bool:
66
+ for exclude_suffix in exclude_files:
67
+ if filename.endswith(exclude_suffix):
68
+ return True
69
+ return False
70
+
71
+
72
+ def check_code_for_cuda_kernel_launches(code, filename=None):
73
+ """Checks code for CUDA kernel launches without cuda error checks.
74
+
75
+ Args:
76
+ filename - Filename of file containing the code. Used only for display
77
+ purposes, so you can put anything here.
78
+ code - The code to check
79
+
80
+ Returns:
81
+ The number of unsafe kernel launches in the code
82
+ """
83
+ if filename is None:
84
+ filename = "##Python Function Call##"
85
+
86
+ # We break the code apart and put it back together to add
87
+ # helpful line numberings for identifying problem areas
88
+ code = enumerate(code.split("\n")) # Split by line breaks
89
+ code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines
90
+ code = '\n'.join(code) # Put it back together
91
+
92
+ num_launches_without_checks = 0
93
+ for m in kernel_launch_start.finditer(code):
94
+ end_paren = find_matching_paren(code, m.end() - 1)
95
+ if has_check.match(code, end_paren):
96
+ num_launches_without_checks += 1
97
+ context = code[m.start():end_paren + 1]
98
+ print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr)
99
+
100
+ return num_launches_without_checks
101
+
102
+
103
+ def check_file(filename):
104
+ """Checks a file for CUDA kernel launches without cuda error checks
105
+
106
+ Args:
107
+ filename - File to check
108
+
109
+ Returns:
110
+ The number of unsafe kernel launches in the file
111
+ """
112
+ if not (filename.endswith((".cu", ".cuh"))):
113
+ return 0
114
+ if should_exclude_file(filename):
115
+ return 0
116
+ with open(filename) as fo:
117
+ contents = fo.read()
118
+ unsafeCount = check_code_for_cuda_kernel_launches(contents, filename)
119
+ return unsafeCount
120
+
121
+
122
+ def check_cuda_kernel_launches():
123
+ """Checks all pytorch code for CUDA kernel launches without cuda error checks
124
+
125
+ Returns:
126
+ The number of unsafe kernel launches in the codebase
127
+ """
128
+ torch_dir = os.path.dirname(os.path.realpath(__file__))
129
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent torch
130
+ torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2
131
+
132
+ kernels_without_checks = 0
133
+ files_without_checks = []
134
+ for root, dirnames, filenames in os.walk(torch_dir):
135
+ # `$BASE/build` and `$BASE/torch/include` are generated
136
+ # so we don't want to flag their contents
137
+ if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"):
138
+ # Curtail search by modifying dirnames and filenames in place
139
+ # Yes, this is the way to do this, see `help(os.walk)`
140
+ dirnames[:] = []
141
+ continue
142
+
143
+ for x in filenames:
144
+ filename = os.path.join(root, x)
145
+ file_result = check_file(filename)
146
+ if file_result > 0:
147
+ kernels_without_checks += file_result
148
+ files_without_checks.append(filename)
149
+
150
+ if kernels_without_checks > 0:
151
+ count_str = f"Found {kernels_without_checks} instances in " \
152
+ f"{len(files_without_checks)} files where kernel " \
153
+ "launches didn't have checks."
154
+ print(count_str, file=sys.stderr)
155
+ print("Files without checks:", file=sys.stderr)
156
+ for x in files_without_checks:
157
+ print(f"\t{x}", file=sys.stderr)
158
+ print(count_str, file=sys.stderr)
159
+
160
+ return kernels_without_checks
161
+
162
+
163
+ if __name__ == "__main__":
164
+ unsafe_launches = check_cuda_kernel_launches()
165
+ sys.exit(0 if unsafe_launches == 0 else 1)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: ignore-errors
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Owner(s): ["oncall: distributed"]
4
+
5
+ from typing import Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+
11
+ class UnitModule(nn.Module):
12
+ def __init__(self, device: torch.device):
13
+ super().__init__()
14
+ self.l1 = nn.Linear(100, 100, device=device)
15
+ self.seq = nn.Sequential(
16
+ nn.ReLU(),
17
+ nn.Linear(100, 100, device=device),
18
+ nn.ReLU(),
19
+ )
20
+ self.l2 = nn.Linear(100, 100, device=device)
21
+
22
+ def forward(self, x):
23
+ return self.l2(self.seq(self.l1(x)))
24
+
25
+
26
+ class CompositeModel(nn.Module):
27
+ def __init__(self, device: torch.device):
28
+ super().__init__()
29
+ self.l1 = nn.Linear(100, 100, device=device)
30
+ self.u1 = UnitModule(device)
31
+ self.u2 = UnitModule(device)
32
+ self.l2 = nn.Linear(100, 100, device=device)
33
+
34
+ def forward(self, x):
35
+ return self.l2(self.u2(self.u1(self.l1(x))))
36
+
37
+
38
+ class UnitParamModule(nn.Module):
39
+ def __init__(self, device: torch.device):
40
+ super().__init__()
41
+ self.l = nn.Linear(100, 100, device=device)
42
+ self.seq = nn.Sequential(
43
+ nn.ReLU(),
44
+ nn.Linear(100, 100, device=device),
45
+ nn.ReLU(),
46
+ )
47
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
48
+
49
+ def forward(self, x):
50
+ return torch.mm(self.seq(self.l(x)), self.p)
51
+
52
+
53
+ class CompositeParamModel(nn.Module):
54
+ def __init__(self, device: torch.device):
55
+ super().__init__()
56
+ self.l = nn.Linear(100, 100, device=device)
57
+ self.u1 = UnitModule(device)
58
+ self.u2 = UnitModule(device)
59
+ self.p = nn.Parameter(torch.randn((100, 100), device=device))
60
+ self.register_buffer(
61
+ "buffer", torch.randn((100, 100), device=device), persistent=True
62
+ )
63
+
64
+ def forward(self, x):
65
+ a = self.u2(self.u1(self.l(x)))
66
+ b = self.p
67
+ return torch.mm(a, b)
68
+
69
+
70
+ class FakeSequential(nn.Module):
71
+ # Define this class to achieve a desired nested wrapping using the module
72
+ # wrap policy with `nn.Sequential`
73
+ def __init__(self, *modules: Tuple[nn.Module, ...]) -> None:
74
+ super().__init__()
75
+ self._module_sequence = list(modules)
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ for module in self._module_sequence:
79
+ x = module(x)
80
+ return x
81
+
82
+
83
+ class NestedSequentialModel(nn.Module):
84
+ def __init__(self, device: torch.device) -> None:
85
+ super().__init__()
86
+ # This nested structure exercises traversal order to catch differences
87
+ # between valid traversals (e.g. BFS and DFS variations).
88
+ self.seq1 = nn.Sequential(
89
+ nn.Linear(1, 1, device=device),
90
+ FakeSequential(
91
+ nn.Linear(1, 1, device=device),
92
+ nn.ReLU(),
93
+ FakeSequential(
94
+ nn.Linear(1, 1, device=device),
95
+ ),
96
+ nn.ReLU(),
97
+ ),
98
+ nn.Linear(1, 2, device=device),
99
+ )
100
+ self.lin = nn.Linear(2, 2, device=device)
101
+ self.seq2 = nn.Sequential(
102
+ nn.ReLU(),
103
+ nn.Linear(2, 3, device=device),
104
+ FakeSequential(
105
+ nn.Linear(3, 2, bias=False, device=device),
106
+ nn.Linear(2, 4, bias=False, device=device),
107
+ ),
108
+ )
109
+
110
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
111
+ return self.seq2(self.lin(self.seq1(x)))
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py ADDED
@@ -0,0 +1,1422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import abc
4
+ import faulthandler
5
+ import itertools
6
+ import logging
7
+ import multiprocessing
8
+ import os
9
+ import queue
10
+ import subprocess
11
+ import sys
12
+ import tempfile
13
+ import threading
14
+ import time
15
+ import traceback
16
+ import types
17
+ import unittest
18
+ from contextlib import contextmanager
19
+ from dataclasses import dataclass
20
+ from datetime import timedelta
21
+ from enum import Enum
22
+ from functools import partial, reduce, wraps
23
+ from io import StringIO
24
+ from typing import Dict, NamedTuple, Optional, Union, List, Any, Callable, Tuple
25
+ from unittest.mock import patch
26
+
27
+ import torch
28
+ import torch._dynamo.test_case
29
+ import torch.cuda.nccl
30
+ import torch.distributed as c10d
31
+ import torch.nn as nn
32
+ from torch.testing._internal.common_utils import (
33
+ FILE_SCHEMA,
34
+ find_free_port,
35
+ IS_SANDCASTLE,
36
+ retry_on_connect_failures,
37
+ skip_but_pass_in_sandcastle,
38
+ skip_but_pass_in_sandcastle_if,
39
+ TEST_WITH_ROCM,
40
+ TEST_WITH_TSAN,
41
+ TestCase,
42
+ run_tests,
43
+ )
44
+ from torch.testing._internal.distributed.multi_threaded_pg import (
45
+ _install_threaded_pg,
46
+ _uninstall_threaded_pg,
47
+ ProcessLocalGroup,
48
+ )
49
+ import operator
50
+
51
+ logging.basicConfig(level=logging.INFO)
52
+ logger = logging.getLogger(__name__)
53
+
54
+
55
+ class TestSkip(NamedTuple):
56
+ exit_code: int
57
+ message: str
58
+
59
+
60
+ TEST_SKIPS = {
61
+ "backend_unavailable": TestSkip(
62
+ 72, "Skipped because distributed backend is not available."
63
+ ),
64
+ "small_worldsize": TestSkip(73, "Skipped due to small world size."),
65
+ "odd_worldsize": TestSkip(87, "Skipped due to odd world size."),
66
+ "no_cuda": TestSkip(74, "CUDA is not available."),
67
+ "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"),
68
+ "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"),
69
+ "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"),
70
+ "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"),
71
+ "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"),
72
+ "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"),
73
+ "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"),
74
+ "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"),
75
+ "nccl": TestSkip(76, "c10d not compiled with NCCL support"),
76
+ "skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
77
+ "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
78
+ "generic": TestSkip(
79
+ 86, "Test skipped at subprocess level, look at subprocess log for skip reason"
80
+ ),
81
+ "importerror": TestSkip(88, "Test skipped due to missing import"),
82
+ }
83
+
84
+
85
+ @dataclass
86
+ class DistTestCases:
87
+ # Backends that do not support a specific collective
88
+ skip_collective = {}
89
+ skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"}
90
+ skip_collective["reduce"] = set()
91
+ skip_collective["sendrecv anysource"] = {"nccl", "ucc"}
92
+ skip_collective["cpu barrier"] = {"nccl", "ucc"}
93
+
94
+ # Sets showing that something is implemented
95
+ backend_feature = {}
96
+ backend_feature["gpu"] = {"nccl", "gloo", "ucc"}
97
+ backend_feature["cuda"] = {"nccl", "gloo", "ucc"}
98
+ backend_feature["ddp"] = {"nccl", "gloo", "ucc"}
99
+ backend_feature["subgroup"] = {"nccl", "gloo", "ucc"}
100
+ backend_feature["plugin"] = set()
101
+
102
+
103
+ def skip_if_no_gpu(func):
104
+ """Skips if the world size exceeds the number of GPUs, ensuring that if the
105
+ test is run, each rank has its own GPU via ``torch.cuda.device(rank)``."""
106
+
107
+ @wraps(func)
108
+ def wrapper(*args, **kwargs):
109
+ if not torch.cuda.is_available():
110
+ sys.exit(TEST_SKIPS["no_cuda"].exit_code)
111
+ world_size = int(os.environ["WORLD_SIZE"])
112
+ if torch.cuda.device_count() < world_size:
113
+ sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code)
114
+
115
+ return func(*args, **kwargs)
116
+
117
+ return wrapper
118
+
119
+
120
+ def skip_if_small_worldsize(func):
121
+ @wraps(func)
122
+ def wrapper(*args, **kwargs):
123
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
124
+ sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
125
+
126
+ return func(*args, **kwargs)
127
+
128
+ return wrapper
129
+
130
+
131
+ def skip_if_odd_worldsize(func):
132
+ @wraps(func)
133
+ def wrapper(*args, **kwargs):
134
+ if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1:
135
+ sys.exit(TEST_SKIPS["odd_worldsize"].exit_code)
136
+
137
+ return func(*args, **kwargs)
138
+
139
+ return wrapper
140
+
141
+
142
+ def require_n_gpus_for_nccl_backend(n, backend):
143
+ def decorator(func):
144
+ @wraps(func)
145
+ def wrapper(*args, **kwargs):
146
+ if backend == "nccl" and torch.cuda.device_count() < n:
147
+ sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code)
148
+ else:
149
+ return func(*args, **kwargs)
150
+
151
+ return wrapper
152
+
153
+ return decorator
154
+
155
+
156
+ def import_transformers_or_skip():
157
+ def decorator(func):
158
+ @wraps(func)
159
+ def wrapper(*args, **kwargs):
160
+ try:
161
+ from transformers import ( # noqa: F401
162
+ AutoModelForMaskedLM,
163
+ BertConfig,
164
+ )
165
+
166
+ return func(*args, **kwargs)
167
+ except ImportError:
168
+ sys.exit(TEST_SKIPS["importerror"].exit_code)
169
+
170
+ return wrapper
171
+
172
+ return decorator
173
+
174
+
175
+ def at_least_x_gpu(x):
176
+ return torch.cuda.is_available() and torch.cuda.device_count() >= x
177
+
178
+
179
+ def skip_if_lt_x_gpu(x):
180
+ def decorator(func):
181
+ @wraps(func)
182
+ def wrapper(*args, **kwargs):
183
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
184
+ return func(*args, **kwargs)
185
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
186
+
187
+ return wrapper
188
+
189
+ return decorator
190
+
191
+
192
+ # This decorator helps avoiding initializing cuda while testing other backends
193
+ def nccl_skip_if_lt_x_gpu(backend, x):
194
+ def decorator(func):
195
+ @wraps(func)
196
+ def wrapper(*args, **kwargs):
197
+ if backend != "nccl":
198
+ return func(*args, **kwargs)
199
+ if torch.cuda.is_available() and torch.cuda.device_count() >= x:
200
+ return func(*args, **kwargs)
201
+ sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
202
+
203
+ return wrapper
204
+
205
+ return decorator
206
+
207
+
208
+ def verify_ddp_error_logged(model_DDP, err_substr):
209
+ # Verify error was logged in ddp_logging_data.
210
+ ddp_logging_data = model_DDP._get_ddp_logging_data()
211
+ assert "iteration" in ddp_logging_data
212
+ assert "has_error" in ddp_logging_data
213
+ assert "error" in ddp_logging_data
214
+ logging_err = ddp_logging_data["error"]
215
+ # Remove C++ stacktrace if needed.
216
+ actual = (
217
+ err_substr
218
+ if err_substr.find("\nException raised from ") == -1
219
+ else err_substr.split("\nException raised from ")[0]
220
+ )
221
+ assert (
222
+ actual in logging_err
223
+ ), f"Did not find expected {actual} in ddp logging data error: {logging_err}"
224
+
225
+
226
+ def with_nccl_blocking_wait(func):
227
+ """
228
+ Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of
229
+ this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for
230
+ the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and
231
+ TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
232
+ """
233
+
234
+ @wraps(func)
235
+ def wrapper(*args, **kwargs):
236
+ # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING
237
+ try:
238
+ cached_nccl_async_error_handling: Union[str, None] = os.environ[
239
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
240
+ ]
241
+ del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"]
242
+ except KeyError:
243
+ # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset
244
+ cached_nccl_async_error_handling = None
245
+
246
+ # Save val of TORCH_NCCL_BLOCKING_WAIT and set it.
247
+ try:
248
+ cached_nccl_blocking_wait: Union[str, None] = os.environ[
249
+ "TORCH_NCCL_BLOCKING_WAIT"
250
+ ]
251
+ except KeyError:
252
+ cached_nccl_blocking_wait = None
253
+ finally:
254
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1"
255
+
256
+ try:
257
+ ret = func(*args, **kwargs)
258
+ return ret
259
+ finally:
260
+ # restore old values.
261
+ if cached_nccl_async_error_handling is not None:
262
+ os.environ[
263
+ "TORCH_NCCL_ASYNC_ERROR_HANDLING"
264
+ ] = cached_nccl_async_error_handling
265
+
266
+ if cached_nccl_blocking_wait is not None:
267
+ os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
268
+
269
+ return wrapper
270
+
271
+
272
+ def with_dist_debug_levels(levels):
273
+ """
274
+ Runs a test for each distributed debug level specified in levels.
275
+ """
276
+
277
+ def decorator(func):
278
+ @wraps(func)
279
+ def wrapper(*args, **kwargs):
280
+ old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
281
+ for level in levels:
282
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
283
+ c10d.set_debug_level_from_env()
284
+ ret = func(*args, **kwargs)
285
+ c10d.barrier()
286
+ if old_level is not None:
287
+ os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
288
+ # Only returns test return for last test, but since these are
289
+ # unittests the return value is not really used and earlier tests
290
+ # would've raised had they failed.
291
+ return ret
292
+
293
+ return wrapper
294
+
295
+ return decorator
296
+
297
+
298
+ def requires_gloo():
299
+ return skip_but_pass_in_sandcastle_if(
300
+ not c10d.is_gloo_available(),
301
+ "c10d was not compiled with the Gloo backend",
302
+ )
303
+
304
+
305
+ def requires_nccl_version(version, msg):
306
+ if not c10d.is_nccl_available():
307
+ return skip_but_pass_in_sandcastle(
308
+ "c10d was not compiled with the NCCL backend",
309
+ )
310
+ else:
311
+ return skip_but_pass_in_sandcastle_if(
312
+ torch.cuda.nccl.version() < version,
313
+ f"Requires NCCL version greater than or equal to: {version}, found: {torch.cuda.nccl.version()}, reason: {msg}",
314
+ )
315
+
316
+
317
+ def requires_nccl():
318
+ return skip_but_pass_in_sandcastle_if(
319
+ not c10d.is_nccl_available(),
320
+ "c10d was not compiled with the NCCL backend",
321
+ )
322
+
323
+ def requires_ucc():
324
+ return skip_but_pass_in_sandcastle_if(
325
+ not c10d.is_ucc_available(),
326
+ "c10d was not compiled with the UCC backend",
327
+ )
328
+
329
+ def requires_mpi():
330
+ return skip_but_pass_in_sandcastle_if(
331
+ not c10d.is_mpi_available(),
332
+ "c10d was not compiled with the MPI backend",
333
+ )
334
+
335
+
336
+ def skip_if_rocm_multiprocess(func):
337
+ """Skips a test for ROCm"""
338
+ func.skip_if_rocm_multiprocess = True
339
+
340
+ @wraps(func)
341
+ def wrapper(*args, **kwargs):
342
+ if not TEST_WITH_ROCM:
343
+ return func(*args, **kwargs)
344
+ sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
345
+
346
+ return wrapper
347
+
348
+
349
+ def skip_if_win32():
350
+ return skip_but_pass_in_sandcastle_if(
351
+ sys.platform == "win32",
352
+ "This unit test case is not supported on Windows platform",
353
+ )
354
+
355
+
356
+ @retry_on_connect_failures
357
+ def create_tcp_store(
358
+ addr="localhost",
359
+ world_size=1,
360
+ is_master=True,
361
+ timeout=timedelta(minutes=5),
362
+ wait_for_workers=True,
363
+ jit_class=False,
364
+ use_libuv=True,
365
+ ):
366
+ """
367
+ Creates a TCP store. Retries if the chosen port is already in use.
368
+ """
369
+ port = find_free_port()
370
+ if jit_class:
371
+ timeout_millisecond = int(timeout / timedelta(milliseconds=1))
372
+ return torch.classes.dist_c10d.TCPStore(
373
+ addr, port, world_size, is_master, timeout_millisecond
374
+ )
375
+ else:
376
+ return c10d.TCPStore(
377
+ addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv
378
+ )
379
+
380
+
381
+ if TEST_WITH_TSAN:
382
+ # TSAN runs much slower.
383
+ TIMEOUT_DEFAULT = 500
384
+ else:
385
+ TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300'))
386
+ TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
387
+
388
+
389
+ # https://github.com/pytorch/pytorch/issues/75665
390
+ if TEST_WITH_ROCM:
391
+ TIMEOUT_OVERRIDE["test_join_kwargs"] = 200
392
+
393
+
394
+ def create_device(interface=None):
395
+ if sys.platform == "win32" or interface is None:
396
+ return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
397
+ else:
398
+ return c10d.ProcessGroupGloo.create_device(interface=interface)
399
+
400
+
401
+ def get_timeout(test_id) -> int:
402
+ return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
403
+
404
+
405
+ @contextmanager
406
+ def captured_output():
407
+ new_out, new_err = StringIO(), StringIO()
408
+ old_out, old_err = sys.stdout, sys.stderr
409
+ try:
410
+ sys.stdout, sys.stderr = new_out, new_err
411
+ yield sys.stdout, sys.stderr
412
+ finally:
413
+ sys.stdout, sys.stderr = old_out, old_err
414
+
415
+
416
+ def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
417
+ """
418
+ Generate a number of basic test cases for sparse reduction.
419
+ These cover tensors with a varying number of sparse dimensions and a varying
420
+ number of dense dimensions. The only reduction operation we support is sum.
421
+ """
422
+
423
+ def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
424
+ # First sparse dimension is [0..rank].
425
+ # Subsequent dimensions are always 0, so we know there is
426
+ # a non-empty intersection between any two sparse tensors.
427
+ indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
428
+ shape = [world_size] + [2 for _ in range(dense_dims)]
429
+ for _ in range(sparse_dims - 1):
430
+ indices = torch.cat((indices, torch.zeros(1, rank + 1)))
431
+ shape.append(world_size)
432
+ values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
433
+ return torch.sparse_coo_tensor(indices, values, shape)
434
+
435
+ def compute_sum(fn, world_size: int):
436
+ return reduce(
437
+ operator.add, [fn(rank, world_size) for rank in range(world_size)]
438
+ )
439
+
440
+ return [
441
+ (
442
+ [
443
+ fn(num_inputs * rank + i, num_inputs * world_size)
444
+ for i in range(num_inputs)
445
+ ],
446
+ [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
447
+ )
448
+ for fn in [
449
+ partial(generate, sparse_dims=1),
450
+ partial(generate, sparse_dims=2),
451
+ partial(generate, sparse_dims=3),
452
+ partial(generate, dense_dims=1),
453
+ partial(generate, dense_dims=2),
454
+ partial(generate, dense_dims=3),
455
+ ]
456
+ ]
457
+
458
+
459
+ # HELPER FOR MULTIGPU TESTS
460
+ def init_multigpu_helper(world_size: int, backend: str):
461
+ """Multigpu tests are designed to simulate the multi nodes with multi
462
+ GPUs on each node. Nccl backend requires equal #GPUs in each process.
463
+ On a single node, all visible GPUs are evenly
464
+ divided to subsets, each process only uses a subset.
465
+ """
466
+ nGPUs = torch.cuda.device_count()
467
+ visible_devices = range(nGPUs)
468
+
469
+ # If rank is less than or equal to number of available GPU's
470
+ # then each rank can be mapped to corresponding GPU.
471
+ nGPUs_per_process = 1
472
+ if world_size > nGPUs:
473
+ nGPUs_per_process = nGPUs // world_size
474
+ rank_to_GPU = {
475
+ i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process])
476
+ for i in range(world_size)
477
+ }
478
+ return rank_to_GPU
479
+
480
+
481
+ tmp_dir: Optional[tempfile.TemporaryDirectory] = None
482
+
483
+
484
+ def initialize_temp_directories(init_method: Optional[str] = None) -> None:
485
+ global tmp_dir
486
+ tmp_dir = tempfile.TemporaryDirectory()
487
+ os.environ["TEMP_DIR"] = tmp_dir.name
488
+ os.mkdir(os.path.join(tmp_dir.name, "barrier"))
489
+ os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
490
+ init_dir_path = os.path.join(tmp_dir.name, "init_dir")
491
+ os.mkdir(init_dir_path)
492
+ # Set init method if specified.
493
+ if init_method is not None:
494
+ os.environ["INIT_METHOD"] = init_method
495
+ else:
496
+ os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
497
+ init_dir_path, "shared_init_file"
498
+ )
499
+
500
+
501
+ def cleanup_temp_dir() -> None:
502
+ if tmp_dir is not None:
503
+ tmp_dir.cleanup()
504
+
505
+
506
+ # Most tests operate with this worldsize
507
+ DEFAULT_WORLD_SIZE = 4
508
+
509
+ # [How does MultiProcessTestCase work?]
510
+ # Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
511
+ # default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
512
+ # example which inherits from this class. Its `Setup()` methods calls into
513
+ # `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
514
+ # subprocesses. During the spawn, the main process passes the test name to
515
+ # subprocesses, and the name is acquired from self.id(). The subprocesses
516
+ # then use the provided test function name to retrieve the function attribute
517
+ # from the test instance and run it. The main process simply waits for all
518
+ # subprocesses to join.
519
+
520
+
521
+ class MultiProcessTestCase(TestCase):
522
+ MAIN_PROCESS_RANK = -1
523
+ # This exit code is used to indicate that the test code had an error and
524
+ # exited abnormally. There are certain tests that might use sys.exit() to
525
+ # simulate failures and in those cases, we can't have an exit code of 0,
526
+ # but we still want to ensure we didn't run into any other errors.
527
+ TEST_ERROR_EXIT_CODE = 10
528
+
529
+ # do not early terminate for distributed tests.
530
+ def _should_stop_test_suite(self) -> bool:
531
+ return False
532
+
533
+ @property
534
+ def world_size(self) -> int:
535
+ return DEFAULT_WORLD_SIZE
536
+
537
+ def join_or_run(self, fn):
538
+ @wraps(fn)
539
+ def wrapper(self):
540
+ if self.rank == self.MAIN_PROCESS_RANK:
541
+ self._join_processes(fn)
542
+ else:
543
+ fn()
544
+
545
+ return types.MethodType(wrapper, self)
546
+
547
+ # The main process spawns N subprocesses that run the test.
548
+ # Constructor patches current instance test method to
549
+ # assume the role of the main process and join its subprocesses,
550
+ # or run the underlying test function.
551
+ def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None:
552
+ # methodName is the correct naming in unittest and testslide uses keyword arguments.
553
+ # So we need to use both to 1) not break BC and, 2) support testslide.
554
+ if methodName != "runTest":
555
+ method_name = methodName
556
+ super().__init__(method_name)
557
+ fn = getattr(self, method_name)
558
+ setattr(self, method_name, self.join_or_run(fn))
559
+
560
+ def setUp(self) -> None:
561
+ super().setUp()
562
+ self.skip_return_code_checks = [] # type: ignore[var-annotated]
563
+ self.processes = [] # type: ignore[var-annotated]
564
+ self.rank = self.MAIN_PROCESS_RANK
565
+ self.file_name = tempfile.NamedTemporaryFile(delete=False).name
566
+ # pid to pipe consisting of error message from process.
567
+ self.pid_to_pipe = {} # type: ignore[var-annotated]
568
+
569
+ def tearDown(self) -> None:
570
+ super().tearDown()
571
+ for p in self.processes:
572
+ p.terminate()
573
+ # Each Process instance holds a few open file descriptors. The unittest
574
+ # runner creates a new TestCase instance for each test method and keeps
575
+ # it alive until the end of the entire suite. We must thus reset the
576
+ # processes to prevent an effective file descriptor leak.
577
+ self.processes = []
578
+
579
+ def _current_test_name(self) -> str:
580
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
581
+ return self.id().split(".")[-1]
582
+
583
+ def _start_processes(self, proc) -> None:
584
+ self.processes = []
585
+ for rank in range(int(self.world_size)):
586
+ parent_conn, child_conn = torch.multiprocessing.Pipe()
587
+ process = proc(
588
+ target=self.__class__._run,
589
+ name="process " + str(rank),
590
+ args=(rank, self._current_test_name(), self.file_name, child_conn),
591
+ kwargs={
592
+ "fake_pg": getattr(self, "fake_pg", False),
593
+ }
594
+ )
595
+ process.start()
596
+ logger.info("Started process %s with pid %s", rank, process.pid)
597
+ self.pid_to_pipe[process.pid] = parent_conn
598
+ self.processes.append(process)
599
+
600
+ def _spawn_processes(self) -> None:
601
+ proc = torch.multiprocessing.get_context("spawn").Process
602
+ self._start_processes(proc)
603
+
604
+ class Event(Enum):
605
+ GET_TRACEBACK = 1
606
+
607
+ @staticmethod
608
+ def _event_listener(parent_pipe, signal_pipe, rank: int):
609
+ logger.info("Starting event listener thread for rank %s", rank)
610
+ while True:
611
+ ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe])
612
+
613
+ if parent_pipe in ready_pipes:
614
+
615
+ if parent_pipe.closed:
616
+ logger.info(
617
+ "Pipe closed for process %s, stopping event listener thread", rank
618
+ )
619
+ return
620
+
621
+ event = parent_pipe.recv()
622
+ logger.info("Received event %s on process %s", event, rank)
623
+
624
+ if event == MultiProcessTestCase.Event.GET_TRACEBACK:
625
+ # Return traceback to the parent process.
626
+ with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
627
+ faulthandler.dump_traceback(tmp_file)
628
+ # Flush buffers and seek to read from the beginning
629
+ tmp_file.flush()
630
+ tmp_file.seek(0)
631
+ parent_pipe.send(tmp_file.read())
632
+
633
+ logger.info("Process %s sent traceback", rank)
634
+
635
+ if signal_pipe in ready_pipes:
636
+ return
637
+
638
+ @classmethod
639
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None:
640
+ self = cls(test_name)
641
+ self.rank = rank
642
+ self.file_name = file_name
643
+ self.run_test(test_name, parent_pipe)
644
+
645
+ def run_test(self, test_name: str, parent_pipe) -> None:
646
+ # Start event listener thread.
647
+ signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False)
648
+ event_listener_thread = threading.Thread(
649
+ target=MultiProcessTestCase._event_listener,
650
+ args=(parent_pipe, signal_recv_pipe, self.rank),
651
+ daemon=True,
652
+ )
653
+ event_listener_thread.start()
654
+ if sys.platform != "win32" and sys.platform != "darwin":
655
+ # Register signal handler to dump stack traces on FATALs.
656
+ # Windows and MacOS do not support the signal handlers.
657
+ torch._C._set_print_stack_traces_on_fatal_signal(True)
658
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
659
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
660
+
661
+ # self.id() == e.g. '__main__.TestDistributed.test_get_rank'
662
+ # We're retrieving a corresponding test and executing it.
663
+ try:
664
+ getattr(self, test_name)()
665
+ except unittest.SkipTest as se:
666
+ logger.info(
667
+ "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se)
668
+ )
669
+ sys.exit(TEST_SKIPS["generic"].exit_code)
670
+ except Exception as e:
671
+ logger.error(
672
+ "Caught exception: \n%s exiting "
673
+ "process %s with exit code: %s",
674
+ traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE
675
+ )
676
+ # Send error to parent process.
677
+ parent_pipe.send(traceback.format_exc())
678
+ sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
679
+ finally:
680
+ if signal_send_pipe is not None:
681
+ signal_send_pipe.send(None)
682
+
683
+ assert event_listener_thread is not None
684
+ event_listener_thread.join()
685
+ # Close pipe after done with test.
686
+ parent_pipe.close()
687
+
688
+ def _get_timedout_process_traceback(self) -> None:
689
+ pipes = []
690
+ for i, process in enumerate(self.processes):
691
+ if process.exitcode is None:
692
+ pipe = self.pid_to_pipe[process.pid]
693
+ try:
694
+ pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
695
+ pipes.append((i, pipe))
696
+ except ConnectionError as e:
697
+ logger.error(
698
+ "Encountered error while trying to get traceback for process %s: %s", i, e
699
+ )
700
+
701
+ # Wait for results.
702
+ for rank, pipe in pipes:
703
+ try:
704
+ # Wait for traceback
705
+ if pipe.poll(5):
706
+ if pipe.closed:
707
+ logger.info(
708
+ "Pipe closed for process %s, cannot retrieve traceback", rank
709
+ )
710
+ continue
711
+
712
+ traceback = pipe.recv()
713
+ logger.error(
714
+ "Process %s timed out with traceback: \n\n%s", rank, traceback
715
+ )
716
+ else:
717
+ logger.error(
718
+ "Could not retrieve traceback for timed out process: %s", rank
719
+ )
720
+ except ConnectionError as e:
721
+ logger.error(
722
+ "Encountered error while trying to get traceback for process %s: %s", rank, e
723
+ )
724
+
725
+ def _join_processes(self, fn) -> None:
726
+ timeout = get_timeout(self.id())
727
+ start_time = time.time()
728
+ subprocess_error = False
729
+ try:
730
+ while True:
731
+ # check to see if any subprocess exited with an error early.
732
+ for (i, p) in enumerate(self.processes):
733
+ # This is the exit code processes exit with if they
734
+ # encountered an exception.
735
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
736
+ print(
737
+ f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
738
+ )
739
+ active_children = torch.multiprocessing.active_children()
740
+ for ac in active_children:
741
+ ac.terminate()
742
+ subprocess_error = True
743
+ break
744
+ if subprocess_error:
745
+ break
746
+ # All processes have joined cleanly if they all a valid exitcode
747
+ if all(p.exitcode is not None for p in self.processes):
748
+ break
749
+ # Check if we should time out the test. If so, we terminate each process.
750
+ elapsed = time.time() - start_time
751
+ if elapsed > timeout:
752
+ self._get_timedout_process_traceback()
753
+ print(
754
+ f"Timing out after {timeout} seconds and killing subprocesses."
755
+ )
756
+ for p in self.processes:
757
+ p.terminate()
758
+ break
759
+ # Sleep to avoid excessive busy polling.
760
+ time.sleep(0.1)
761
+
762
+ elapsed_time = time.time() - start_time
763
+
764
+ if fn in self.skip_return_code_checks:
765
+ self._check_no_test_errors(elapsed_time)
766
+ else:
767
+ self._check_return_codes(elapsed_time)
768
+ finally:
769
+ # Close all pipes
770
+ for pipe in self.pid_to_pipe.values():
771
+ pipe.close()
772
+
773
+ def _check_no_test_errors(self, elapsed_time) -> None:
774
+ """
775
+ Checks that we didn't have any errors thrown in the child processes.
776
+ """
777
+ for i, p in enumerate(self.processes):
778
+ if p.exitcode is None:
779
+ raise RuntimeError(
780
+ f"Process {i} timed out after {elapsed_time} seconds"
781
+ )
782
+ self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
783
+
784
+ def _check_return_codes(self, elapsed_time) -> None:
785
+ """
786
+ Checks that the return codes of all spawned processes match, and skips
787
+ tests if they returned a return code indicating a skipping condition.
788
+ """
789
+ # If no processes are spawned, there is nothing to check.
790
+ if not self.processes:
791
+ logger.warning("Note: no subprocesses were spawned, test was likely skipped.")
792
+ return
793
+
794
+ first_process = self.processes[0]
795
+ # first, we check if there are errors in actual processes
796
+ # (via TEST_ERROR_EXIT CODE), and raise an exception for those.
797
+ # the reason we do this is to attempt to raise a more helpful error
798
+ # message than "Process x terminated/timed out"
799
+ # TODO: we should pipe the exception of the failed subprocess here.
800
+ # Currently, the actual exception is displayed as a logging output.
801
+ errored_processes = [
802
+ (i, p)
803
+ for i, p in enumerate(self.processes)
804
+ if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
805
+ ]
806
+ if errored_processes:
807
+ error = ""
808
+ for i, process in errored_processes:
809
+ # Get error from pipe.
810
+ error_message = self.pid_to_pipe[process.pid].recv()
811
+ error += (
812
+ f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} "
813
+ f"and exception:\n{error_message}\n"
814
+ )
815
+
816
+ raise RuntimeError(error)
817
+ # If no process exited uncleanly, we check for timeouts, and then ensure
818
+ # each process exited cleanly.
819
+ for i, p in enumerate(self.processes):
820
+ if p.exitcode is None:
821
+ raise RuntimeError(
822
+ f"Process {i} terminated or timed out after {elapsed_time} seconds"
823
+ )
824
+ self.assertEqual(
825
+ p.exitcode,
826
+ first_process.exitcode,
827
+ msg=f"Expect process {i} exit code to match Process 0 exit code of {first_process.exitcode}, but got {p.exitcode}",
828
+ )
829
+ for skip in TEST_SKIPS.values():
830
+ if first_process.exitcode == skip.exit_code:
831
+ if IS_SANDCASTLE:
832
+ # Don't use unittest.skip to skip the test on sandcastle
833
+ # since it creates tasks for skipped tests assuming there
834
+ # is some follow-up needed. Instead just "pass" the test
835
+ # with an appropriate message.
836
+ logger.info(
837
+ "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message
838
+ )
839
+ return
840
+ else:
841
+ raise unittest.SkipTest(skip.message)
842
+ self.assertEqual(
843
+ first_process.exitcode,
844
+ 0,
845
+ msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}",
846
+ )
847
+
848
+ @property
849
+ def is_master(self) -> bool:
850
+ return self.rank == 0
851
+
852
+
853
+ def run_subtests(
854
+ cls_inst,
855
+ subtest_config: Dict[str, List[Any]],
856
+ test_fn: Callable,
857
+ *test_args,
858
+ **test_kwargs: Any,
859
+ ):
860
+ """
861
+ Runs a test function given by ``test_fn`` as a subtest according to the
862
+ configurations specified by ``subtest_config``. This amortizes the
863
+ costly setup overhead (including process spawn and initializing the
864
+ process group) over the subtests.
865
+
866
+ Args:
867
+ subtest_config (Dict[str, List[Any]]): A mapping from subtest
868
+ keyword argument name to a list of its possible values.
869
+ test_fn (Callable): A callable that runs the actual test.
870
+ test_args: Positional arguments to pass to ``test_fn``.
871
+ test_kwargs: Keyword arguments to pass to ``test_fn``.
872
+ """
873
+ # Convert the config mapping to a list to have a fixed order
874
+ subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items())
875
+ subtest_config_keys: List[str] = [item[0] for item in subtest_config_items]
876
+ subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items]
877
+ for values in itertools.product(*subtest_config_values):
878
+ # Map keyword to chosen value
879
+ subtest_kwargs = dict(zip(subtest_config_keys, values))
880
+ with cls_inst.subTest(**subtest_kwargs):
881
+ torch._dynamo.reset()
882
+ test_fn(*test_args, **test_kwargs, **subtest_kwargs)
883
+ torch._dynamo.reset()
884
+ c10d.barrier()
885
+
886
+
887
+ # Cannot use functools.cache as it requires python 3.9
888
+ EFA_PROBE_RESULT = None
889
+
890
+
891
+ def has_efa() -> bool:
892
+ """
893
+ If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has
894
+ Libfabric EFA interfaces and EFA software components installed,
895
+ see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html.
896
+ """
897
+ global EFA_PROBE_RESULT
898
+ if EFA_PROBE_RESULT is not None:
899
+ return EFA_PROBE_RESULT
900
+
901
+ try:
902
+ EFA_PROBE_RESULT = (
903
+ subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0
904
+ )
905
+ except FileNotFoundError:
906
+ EFA_PROBE_RESULT = False
907
+ return EFA_PROBE_RESULT
908
+
909
+
910
+ def tp_transports():
911
+ """
912
+ If the machine has Libfabric EFA interfaces and EFA software components installed it may cause
913
+ 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe
914
+ uses InfiniBand transport, so we exclude it from tensorpipe transports,
915
+ see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022
916
+ """
917
+ return ["shm", "uv"] if has_efa() else None
918
+
919
+
920
+ def spawn_threads_and_init_comms(
921
+ func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE
922
+ ):
923
+ """
924
+ Wrapper to use with a test method
925
+ """
926
+ if func is None:
927
+ return partial(
928
+ spawn_threads_and_init_comms, timeout=timeout, world_size=world_size
929
+ )
930
+
931
+
932
+ def _run_test_method_with_multi_threads(world_size, callback):
933
+ world = _install_threaded_pg()
934
+ global_store = c10d.HashStore()
935
+
936
+ def world_is_valid():
937
+ return world == c10d.distributed_c10d._world
938
+
939
+ def worker(rank, world_pg, store):
940
+ c10d.init_process_group(
941
+ backend="threaded", rank=rank, world_size=world_size, store=store
942
+ )
943
+ try:
944
+ callback()
945
+ except BaseException as ex:
946
+ # Exceptions are handled in MultiThreadedTestCase
947
+ MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info()))
948
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
949
+ finally:
950
+ if world_is_valid():
951
+ c10d.destroy_process_group()
952
+
953
+ threads = []
954
+ for rank in range(world_size):
955
+ t = threading.Thread(target=worker, args=(rank, world, global_store))
956
+ t.start()
957
+ threads.append(t)
958
+
959
+ return threads
960
+
961
+
962
+ @wraps(func)
963
+ def wrapper(self, *args, **kwargs):
964
+ # TODO: get test name from kwargs
965
+ torch._C._distributed_c10d._set_thread_isolation_mode(True)
966
+ try:
967
+ threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs))
968
+ # join and error handling
969
+ MultiThreadedTestCase._join_threads(threads, func)
970
+ finally:
971
+ torch._C._distributed_c10d._set_thread_isolation_mode(False)
972
+
973
+ return wrapper
974
+
975
+
976
+ class MultiThreadedTestCase(TestCase):
977
+ """
978
+ Test runner that runs all tests with the in-proc process group using
979
+ multiple threads with the threaded process group.
980
+
981
+ Each test spawns world_size threads and run the test method in each thread.
982
+
983
+ Difference from regular MultiProcess test runner:
984
+ Must explicitly defines SetUp and call self._spawn_threads() to run the tests.
985
+ Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown)
986
+ to set up / tear down each thread when running each test.
987
+ No global state possible
988
+ How bad of a limitation is this?
989
+ """
990
+ exception_queue = queue.Queue()
991
+
992
+ MAIN_THREAD_RANK = -1
993
+
994
+ def join_or_run(self, fn):
995
+ @wraps(fn)
996
+ def wrapper(self):
997
+ if self.rank == self.MAIN_THREAD_RANK:
998
+ self._join_threads(self.threads, fn)
999
+ else:
1000
+ fn()
1001
+
1002
+ return types.MethodType(wrapper, self)
1003
+
1004
+ def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None:
1005
+ # methodName is the correct naming in unittest and testslide uses keyword arguments.
1006
+ # So we need to use both to 1) not break BC and, 2) support testslide.
1007
+ if methodName != "runTest":
1008
+ method_name = methodName
1009
+ super().__init__(method_name)
1010
+ fn = getattr(self, method_name)
1011
+ setattr(self, method_name, self.join_or_run(fn))
1012
+
1013
+ def perThreadSetUp(self):
1014
+ # super().setUp() # TestCase.setUp() calls torch.manual_seed()
1015
+ pass
1016
+
1017
+ def perThreadTearDown(self):
1018
+ pass
1019
+
1020
+ def setUp(self) -> None:
1021
+ """
1022
+ setUp only set up things in the main thread, if you want to configure things
1023
+ in the spawned threads, use perThreadSetUp
1024
+ """
1025
+ super().setUp()
1026
+ self.rank = self.MAIN_THREAD_RANK
1027
+ self.threads = []
1028
+ # Show full C++ stacktraces when a Python error originating from C++ is raised.
1029
+ os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1"
1030
+
1031
+ def tearDown(self):
1032
+ """
1033
+ tearDown only set up things in the main thread, if you want to configure things
1034
+ in the spawned threads, use perThreadTearDown
1035
+ """
1036
+ super().tearDown()
1037
+ self.threads = []
1038
+
1039
+ def _spawn_threads(self):
1040
+ """
1041
+ class method to spawn threads and run test, use this method in the SetUp of your TestCase
1042
+ """
1043
+ torch._C._distributed_c10d._set_thread_isolation_mode(True)
1044
+ test_name = self._current_test_name
1045
+ # for each test case, we need to create thread local world, and a global store
1046
+ world = _install_threaded_pg()
1047
+ self.__class__.global_store = c10d.HashStore()
1048
+
1049
+ def world_is_valid():
1050
+ return world == c10d.distributed_c10d._world
1051
+
1052
+ if not world_is_valid():
1053
+ raise RuntimeError("Invalid world")
1054
+
1055
+ for rank in range(self.world_size):
1056
+ t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size))
1057
+ t.start()
1058
+ self.threads.append(t)
1059
+
1060
+ @classmethod
1061
+ def _run(cls, test_name, rank, world_size, **kwargs):
1062
+ self = cls(test_name)
1063
+ self.rank = rank
1064
+
1065
+ # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make
1066
+ # every thread have the same value. This would be relevant when we use op db tests, where it
1067
+ # needs those states to be set i.e. using instantiate_device_type_tests()
1068
+ # TODO: figure out a better way to do this
1069
+ if hasattr(self, "_tls"):
1070
+ self._tls = threading.local()
1071
+ self._tls.precision = TestCase._precision
1072
+ self._tls.rel_tol = TestCase._rel_tol
1073
+
1074
+ self.run_test_with_threaded_pg(test_name, rank, world_size)
1075
+
1076
+ def run_test_with_threaded_pg(self, test_name, rank, world_size):
1077
+ """
1078
+ Run the current test associated with `test_name` using the threaded process group.
1079
+ """
1080
+ c10d.init_process_group(
1081
+ backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store
1082
+ )
1083
+ self.perThreadSetUp()
1084
+
1085
+ try:
1086
+ getattr(self, test_name)()
1087
+ except BaseException as ex:
1088
+ self.exception_queue.put((rank, sys.exc_info()))
1089
+ ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads
1090
+ finally:
1091
+ c10d.destroy_process_group()
1092
+ self.perThreadTearDown()
1093
+
1094
+
1095
+ @classmethod
1096
+ def _join_threads(cls, threads, fn):
1097
+ timeout = TIMEOUT_DEFAULT
1098
+ try:
1099
+ for idx, thread in enumerate(threads):
1100
+ thread.join(max(0, timeout))
1101
+ if thread.is_alive():
1102
+ MultiThreadedTestCase.exception_queue.put(
1103
+ (
1104
+ idx,
1105
+ (
1106
+ TimeoutError,
1107
+ TimeoutError(
1108
+ f"Rank failed to join in under {timeout} seconds"
1109
+ ),
1110
+ None,
1111
+ ),
1112
+ )
1113
+ )
1114
+ ProcessLocalGroup.reset()
1115
+ failed_ranks = []
1116
+ while not cls.exception_queue.empty():
1117
+ failure = cls.exception_queue.get()
1118
+ failed_ranks.append(failure)
1119
+ finally:
1120
+ _uninstall_threaded_pg()
1121
+ torch._C._distributed_c10d._set_thread_isolation_mode(False)
1122
+
1123
+ cls._check_return_codes(failed_ranks, timeout, fn)
1124
+
1125
+ @classmethod
1126
+ def _check_return_codes(cls, failed_ranks, timeout, fn):
1127
+ # Print based on exceptions raised from threads
1128
+ # SkipTest: print info for each thread
1129
+ # TimeoutError: raise RuntimeError for any timed out thread
1130
+ # Normal Exception: print error for each thread that raises exception
1131
+ # and raise a RuntimeError
1132
+ error_msg = ""
1133
+ skip_code = -1
1134
+ for rank, exc_info in failed_ranks:
1135
+ exc = exc_info[1]
1136
+ if isinstance(exc, unittest.SkipTest):
1137
+ logger.info(
1138
+ "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc)
1139
+ )
1140
+ if skip_code < 0:
1141
+ skip_code = TEST_SKIPS["generic"].exit_code
1142
+ elif isinstance(exc, TimeoutError):
1143
+ msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n"
1144
+ logger.error(msg)
1145
+ raise RuntimeError(msg)
1146
+ elif isinstance(exc, Exception):
1147
+ msg = "".join(traceback.format_exception(*exc_info))
1148
+ logger.error(
1149
+ "Caught exception: \n%s exiting thread %s", msg, rank
1150
+ )
1151
+ error_msg += (
1152
+ f"Thread {rank} exited with exception:\n{msg}\n"
1153
+ )
1154
+ elif isinstance(exc, SystemExit):
1155
+ if type(exc.code) == int and skip_code < 0:
1156
+ skip_code = exc.code
1157
+
1158
+ # check exceptions
1159
+ if len(error_msg) > 0:
1160
+ raise RuntimeError(error_msg)
1161
+ # check skip
1162
+ if skip_code > 0:
1163
+ for skip in TEST_SKIPS.values():
1164
+ if skip_code == skip.exit_code:
1165
+ if IS_SANDCASTLE:
1166
+ # "pass" the test with an appropriate message.
1167
+ logger.info(
1168
+ "Skipping %s on sandcastle for the following reason: %s", fn, skip.message
1169
+ )
1170
+ return
1171
+ else:
1172
+ raise unittest.SkipTest(skip.message)
1173
+
1174
+ @property
1175
+ def world_size(self) -> int:
1176
+ return DEFAULT_WORLD_SIZE
1177
+
1178
+ @property
1179
+ def _current_test_name(self) -> str:
1180
+ # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
1181
+ return self.id().split(".")[-1]
1182
+
1183
+ def assertEqualOnRank(self, x, y, msg=None, *, rank=0):
1184
+ """
1185
+ The reason why we have this util function instead of
1186
+ self.assertEqual is all threads are sharing one CPU RNG
1187
+ so the assertion result is only reliable on rank 0
1188
+ """
1189
+ if self.rank == rank:
1190
+ self.assertEqual(x, y, msg)
1191
+
1192
+ def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0):
1193
+ if self.rank == rank:
1194
+ self.assertNotEqual(x, y)
1195
+
1196
+
1197
+ class SaveForwardInputsModule(nn.Module):
1198
+ def __init__(
1199
+ self,
1200
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1201
+ cast_forward_inputs: bool,
1202
+ ) -> None:
1203
+ super().__init__()
1204
+ self.l = nn.Linear(100, 100)
1205
+ self.forward_inputs = forward_inputs
1206
+ self.cast_forward_inputs = cast_forward_inputs
1207
+
1208
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1209
+ self.forward_inputs[self] = x
1210
+ return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x)
1211
+
1212
+
1213
+ class SaveForwardInputsModel(nn.Module):
1214
+ def __init__(
1215
+ self,
1216
+ forward_inputs: Dict[nn.Module, torch.Tensor],
1217
+ cast_forward_inputs: bool,
1218
+ ) -> None:
1219
+ super().__init__()
1220
+ self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1221
+ self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
1222
+ self.forward_inputs = forward_inputs
1223
+
1224
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1225
+ self.forward_inputs[self] = x
1226
+ return self.c2(self.c1(x))
1227
+
1228
+ @contextmanager
1229
+ def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True, fake_pg=False):
1230
+ # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
1231
+ # Just manually implement the most important part of the dynamo behavior to reset/clear.
1232
+ if not fake_pg:
1233
+ torch.cuda.set_device(rank)
1234
+ os.environ['MASTER_ADDR'] = 'localhost'
1235
+ os.environ['MASTER_PORT'] = '6789'
1236
+ if init_pg:
1237
+ if fake_pg:
1238
+ store = torch.testing._internal.distributed.fake_pg.FakeStore()
1239
+ c10d.init_process_group(
1240
+ backend="fake",
1241
+ world_size=world_size,
1242
+ rank=rank,
1243
+ store=store,
1244
+ )
1245
+ else:
1246
+ c10d.init_process_group("nccl", rank=rank, world_size=world_size)
1247
+ torch._dynamo.reset()
1248
+ torch._dynamo.utils.counters.clear()
1249
+ try:
1250
+ yield
1251
+ finally:
1252
+ torch._dynamo.reset()
1253
+ torch._dynamo.utils.counters.clear()
1254
+ if init_pg:
1255
+ c10d.destroy_process_group()
1256
+
1257
+
1258
+ class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase):
1259
+ """
1260
+ Test harness for single-process dynamo distributed tests,
1261
+ initializes dist process group.
1262
+
1263
+ Prefer this for simple tests, as it's easier to debug.
1264
+ """
1265
+
1266
+ @classmethod
1267
+ def setUpClass(cls):
1268
+ super().setUpClass()
1269
+ # _exit_stack is set up in TestCase
1270
+ cls._exit_stack.enter_context(
1271
+ patch.dict(
1272
+ os.environ,
1273
+ {
1274
+ "MASTER_ADDR": "localhost",
1275
+ "MASTER_PORT": "12355",
1276
+ },
1277
+ )
1278
+ )
1279
+ cls.rank = 0
1280
+ cls.device = f"cuda:{cls.rank}"
1281
+ cls.device_ids = None if "cuda" in cls.device else [cls.rank]
1282
+ c10d.init_process_group("nccl", rank=cls.rank, world_size=1)
1283
+
1284
+ @classmethod
1285
+ def tearDownClass(cls):
1286
+ c10d.destroy_process_group()
1287
+ super().tearDownClass()
1288
+
1289
+
1290
+ class DynamoDistributedMultiProcTestCase(MultiProcessTestCase):
1291
+ """
1292
+ Use this for tests that actually run on multiple GPUs.
1293
+
1294
+ Decorate tests with @skip_if_lt_x_gpu(ngpu)
1295
+
1296
+ Note: MultiProcTestCase spawns processes per test and is slow.
1297
+ Prefer MultiThreadedTestCase for most tests. Perhaps use this one
1298
+ sparingly for integration tests.
1299
+ """
1300
+ def setUp(self):
1301
+ super().setUp()
1302
+ self._spawn_processes()
1303
+
1304
+ def tearDown(self):
1305
+ super().tearDown()
1306
+ try:
1307
+ os.remove(self.file_name)
1308
+ except OSError:
1309
+ pass
1310
+
1311
+ @property
1312
+ def world_size(self) -> int:
1313
+ return torch.cuda.device_count()
1314
+
1315
+ @classmethod
1316
+ def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None:
1317
+ # The rest is copypasta from MultiProcessTestCase._run
1318
+ self = cls(test_name)
1319
+ self.rank = rank
1320
+ self.file_name = file_name
1321
+ self.run_test(test_name, parent_pipe)
1322
+
1323
+
1324
+ class MultiProcContinousTest(TestCase):
1325
+ # Class variables:
1326
+ # number of test processes
1327
+ world_size: int = 2
1328
+ # rank of the current process
1329
+ rank: int = -1 # unset state
1330
+ # Rendezvous file
1331
+ rdvz_file: Optional[str] = None
1332
+
1333
+ @classmethod
1334
+ @abc.abstractmethod
1335
+ def backend_str(cls) -> str:
1336
+ """
1337
+ ProcessGroup backend str.
1338
+ To be customized by sub test classes, e.g. "nccl".
1339
+ Here we raise error.
1340
+ """
1341
+ raise NotImplementedError("Please implement backend_str in your test class")
1342
+
1343
+ @classmethod
1344
+ def opts(cls, high_priority_stream=False):
1345
+ """
1346
+ ProcessGroup init options.
1347
+ To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest
1348
+ Here we return None.
1349
+ """
1350
+ return None
1351
+
1352
+ @classmethod
1353
+ def setUpClass(cls):
1354
+ """
1355
+ Class-scope test fixture. Run once for entire test class, before any test starts.
1356
+ Set up the process group.
1357
+ """
1358
+ super().setUpClass()
1359
+ if not 0 <= cls.rank < cls.world_size:
1360
+ raise RuntimeError(
1361
+ "Rank must be set and in the range of 0 to world_size. "
1362
+ f"World size: {cls.world_size} Rank: {cls.rank}"
1363
+ )
1364
+ if cls.rdvz_file:
1365
+ store = c10d.FileStore(cls.rdvz_file, cls.world_size)
1366
+ else:
1367
+ # torchrun takes care of rendezvous
1368
+ store = None
1369
+ opts = cls.opts()
1370
+ backend = cls.backend_str()
1371
+ print(f"Testing {backend=}")
1372
+ # create nccl processgroup with opts
1373
+ c10d.init_process_group(
1374
+ backend=backend,
1375
+ world_size=cls.world_size,
1376
+ rank=cls.rank,
1377
+ store=store,
1378
+ pg_options=opts,
1379
+ )
1380
+ cls.pg = c10d.distributed_c10d._get_default_group()
1381
+ print(f"Rank {cls.rank} setup complete")
1382
+
1383
+ @classmethod
1384
+ def tearDownClass(cls):
1385
+ """
1386
+ Class-scope test fixture. Run once for entire test class, after all tests finish.
1387
+ Tear down the process group.
1388
+ """
1389
+ c10d.destroy_process_group()
1390
+ super().tearDownClass()
1391
+ # Clear up the rendezvous file
1392
+ if cls.rdvz_file:
1393
+ try:
1394
+ os.remove(cls.rdvz_file)
1395
+ except OSError:
1396
+ pass
1397
+ print(f"Rank {cls.rank} teardown complete")
1398
+
1399
+ @classmethod
1400
+ def run_rank(
1401
+ cls,
1402
+ rank: int,
1403
+ world_size: int,
1404
+ rdvz_file: Optional[str] = None,
1405
+ ):
1406
+ """
1407
+ This is an entry point for each rank to run the tests in `MultiProcContinousTest`.
1408
+ In this entry point, we set the class variables for the test class.
1409
+ Then we run all tests.
1410
+
1411
+ Note:
1412
+ - This helper only works for a subclass of `MultiProcContinousTest`.
1413
+
1414
+ Example:
1415
+ - See `test_c10d_ops_nccl.py`.
1416
+ """
1417
+ # set class variables for the test class
1418
+ cls.rank = rank
1419
+ cls.world_size = world_size
1420
+ cls.rdvz_file = rdvz_file
1421
+ # Launch tests via `common_utils` infra
1422
+ run_tests()
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py ADDED
@@ -0,0 +1,1532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ # Owner(s): ["oncall: distributed"]
3
+
4
+ import contextlib
5
+ import os
6
+ import re
7
+ import sys
8
+ import warnings
9
+ from abc import ABC, abstractmethod
10
+ from contextlib import nullcontext
11
+ from copy import deepcopy
12
+ from enum import auto, Enum
13
+ from functools import wraps
14
+ from typing import (
15
+ Any,
16
+ Callable,
17
+ Dict,
18
+ List,
19
+ no_type_check,
20
+ Optional,
21
+ Tuple,
22
+ Type,
23
+ Union,
24
+ )
25
+ from unittest import mock
26
+
27
+ import torch
28
+ import torch.distributed as dist
29
+ import torch.nn as nn
30
+ import torch.nn.functional as F
31
+ from torch.distributed._composable import checkpoint
32
+ from torch.distributed._composable.fsdp import fully_shard
33
+ from torch.distributed._composable.fsdp._fsdp_param_group import (
34
+ FSDPParamGroup,
35
+ RegisterPostBackwardFunction,
36
+ )
37
+ from torch.distributed.device_mesh import DeviceMesh
38
+ from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP
39
+ from torch.distributed.fsdp._common_utils import TrainingState
40
+ from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES
41
+ from torch.distributed.fsdp.fully_sharded_data_parallel import (
42
+ BackwardPrefetch,
43
+ MixedPrecision,
44
+ ShardingStrategy,
45
+ )
46
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
47
+ from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap
48
+ from torch.distributed.tensor import distribute_tensor, DTensor, Shard
49
+ from torch.distributed.tensor.parallel import (
50
+ ColwiseParallel,
51
+ parallelize_module,
52
+ RowwiseParallel,
53
+ SequenceParallel,
54
+ )
55
+ from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer
56
+ from torch.nn.parallel.distributed import DistributedDataParallel as DDP
57
+ from torch.testing._internal.common_distributed import (
58
+ MultiProcessTestCase,
59
+ MultiThreadedTestCase,
60
+ run_subtests,
61
+ TEST_SKIPS,
62
+ )
63
+ from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms
64
+ from torch.utils._triton import has_triton
65
+
66
+
67
+ class FSDPInitMode(Enum):
68
+ # No FSDP wrapping
69
+ NO_FSDP = auto()
70
+ # FSDP recursive wrapping
71
+ RECURSIVE = auto()
72
+ # TODO: FSDP non-recursive wrapping
73
+ # NONRECURSIVE = auto()
74
+
75
+
76
+ class CUDAInitMode(Enum):
77
+ # Move model to CUDA before passing to the FSDP constructor
78
+ CUDA_BEFORE = auto()
79
+ # Move model to CUDA after passing to the FSDP constructor
80
+ CUDA_AFTER = auto()
81
+ # Keep on CPU
82
+ CUDA_NEVER = auto()
83
+
84
+
85
+ class FSDPTestModel(nn.Module, ABC):
86
+ """This defines the interface expected from all models used commonly for
87
+ FSDP unit tests."""
88
+
89
+ @abstractmethod
90
+ def get_input(self, device) -> Tuple[torch.Tensor, ...]:
91
+ """Returns an input for the model as as tuple."""
92
+ ...
93
+
94
+ @abstractmethod
95
+ def get_loss(self, input, output) -> torch.Tensor:
96
+ """Returns the loss given the input and output."""
97
+ ...
98
+
99
+ @abstractmethod
100
+ def run_backward(self, loss) -> None:
101
+ """Runs the backward pass (e.g. including ``loss.backward()``)."""
102
+ ...
103
+
104
+ @staticmethod
105
+ @abstractmethod
106
+ def init(*args: Any, **kwargs: Any) -> nn.Module:
107
+ """Initializes an instance of this model."""
108
+ ...
109
+
110
+
111
+ def _assert_module_states(
112
+ model: nn.Module,
113
+ process_group: dist.ProcessGroup,
114
+ assert_fn: Callable,
115
+ ):
116
+ """
117
+ All-gathers module states across ranks and calls ``assert_fn`` on each pair
118
+ of corresponding states from rank 0 and a nonzero rank. For example, if
119
+ ``assert_fn`` is ``self.assertEqual()``, then this checks that all module
120
+ states are equal across ranks.
121
+ """
122
+ # Include names for debugging convenience
123
+ named_module_states = [
124
+ (param_name, param.detach().cpu())
125
+ for param_name, param in model.named_parameters()
126
+ ]
127
+ named_module_states += [
128
+ (buffer_name, buffer.detach().cpu())
129
+ for buffer_name, buffer in model.named_buffers()
130
+ ]
131
+ world_size = dist.get_world_size(process_group)
132
+ olist = [None for _ in range(world_size)]
133
+ dist.all_gather_object(olist, named_module_states, group=process_group)
134
+ rank0_states = olist[0]
135
+ assert rank0_states is not None # mypy
136
+ for state in olist[1:]:
137
+ assert state is not None # mypy
138
+ for (_, p1), (_, p2) in zip(rank0_states, state):
139
+ assert_fn(p1, p2)
140
+
141
+
142
+ def _zero_model(
143
+ model: nn.Module,
144
+ zero_buffers: bool = False,
145
+ summon_full=True,
146
+ ):
147
+ """Zeros the parameters and optionally buffers of ``model`` in place."""
148
+ ctx = FSDP.summon_full_params(model) if summon_full else nullcontext()
149
+ with ctx:
150
+ for param in model.parameters():
151
+ with torch.no_grad():
152
+ param.zero_()
153
+ if zero_buffers:
154
+ for buffer in model.buffers():
155
+ with torch.no_grad():
156
+ buffer.zero_()
157
+
158
+
159
+ def _get_state_dict(model, cpu_offload=False, half=False):
160
+ if not cpu_offload:
161
+ model = model.cuda()
162
+ if half:
163
+ model.half()
164
+
165
+ return model.state_dict()
166
+
167
+
168
+ def subtest_name(test_name_mapping, *args):
169
+ return "_".join(
170
+ [test_name_mapping[str(s)] if s is not None else "none" for s in args]
171
+ )
172
+
173
+
174
+ def _broadcast_state_dict(rank, state_dict):
175
+ # For non-FSDP roots, some parts of the model state on rank 0 may
176
+ # not be on CPU, so we move everything to CPU to avoid issues like:
177
+ # https://github.com/pytorch/pytorch/issues/77113.
178
+ for param_name, param in state_dict.items():
179
+ if param.device != torch.device("cpu"):
180
+ state_dict[param_name] = param.cpu()
181
+
182
+ olist = [state_dict if rank == 0 else None]
183
+ dist.broadcast_object_list(olist)
184
+ state_dict = olist[0]
185
+ # Ensure that the state is on CUDA
186
+ for param_name in state_dict.keys():
187
+ state_dict[param_name] = state_dict[param_name].cuda()
188
+ return state_dict
189
+
190
+
191
+ def get_full_params(model: nn.Module, recurse: bool = True):
192
+ """
193
+ Returns the full unsharded parameters of ``model``. Any FSDP-managed
194
+ parameters offloaded to CPU are moved to GPU in the returned list.
195
+
196
+ Args:
197
+ recurse (bool): If ``False``, only unshards the parameters immediate to
198
+ ``model``; if ``True``, recurses through the module hierarchy
199
+ rooted at ``model``.
200
+ """
201
+ with FSDP.summon_full_params(model, recurse=recurse):
202
+ return deepcopy(list(model.parameters()))
203
+
204
+
205
+ def _maybe_cuda(model: nn.Module, move_to_cuda: bool):
206
+ return model.cuda() if move_to_cuda else model
207
+
208
+
209
+ def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs):
210
+ return model if not wrap_fsdp else FSDP(model, *args, **kwargs)
211
+
212
+
213
+ class DummyProcessGroup:
214
+ def __init__(self, rank: int, size: int):
215
+ self._rank = rank
216
+ self._size = size
217
+
218
+ def rank(self) -> int:
219
+ return self._rank
220
+
221
+ def size(self) -> int:
222
+ return self._size
223
+
224
+ def allreduce(self, *args, **kwargs):
225
+ dist_wait = mock.Mock()
226
+
227
+ def get_future():
228
+ future: torch.futures.Future = torch.futures.Future()
229
+ future.set_result(1)
230
+ return future
231
+
232
+ dist_wait.get_future = get_future
233
+ return dist_wait
234
+
235
+
236
+ class TransformerWithSharedParams(FSDPTestModel):
237
+ def __init__(
238
+ self,
239
+ group: dist.ProcessGroup,
240
+ cuda_init_mode: CUDAInitMode,
241
+ add_bn: bool,
242
+ deterministic: bool,
243
+ ):
244
+ super().__init__()
245
+ self.rank = group.rank()
246
+ self.world_size = group.size()
247
+ if deterministic:
248
+ torch.manual_seed(0)
249
+ d_vocab = 23
250
+ d_model = 16
251
+
252
+ self.embed_tokens = nn.Embedding(d_vocab, d_model)
253
+ self.transformer = nn.Transformer(
254
+ d_model=d_model,
255
+ num_encoder_layers=2,
256
+ num_decoder_layers=2,
257
+ dim_feedforward=8,
258
+ dropout=0.1,
259
+ )
260
+ self.output_proj = nn.Linear(d_model, d_vocab)
261
+
262
+ # share the embedding and output projection weights
263
+ self.output_proj.weight = self.embed_tokens.weight
264
+ self.register_buffer(
265
+ "vocab_bias", self.embed_tokens.weight.new_ones((d_model,))
266
+ )
267
+ self.register_buffer(
268
+ "long_buffer",
269
+ torch.zeros_like(self.vocab_bias, dtype=torch.long),
270
+ ) # type: ignore[arg-type]
271
+
272
+ self.bs = 2
273
+ self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity()
274
+ if cuda_init_mode == CUDAInitMode.CUDA_BEFORE:
275
+ self = self.cuda()
276
+ if deterministic:
277
+ self.eval()
278
+
279
+ def get_input(self, device):
280
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
281
+ src = torch.arange(12, device=device).view(6, self.bs) # T x B
282
+ tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B
283
+ return (src, tgt)
284
+
285
+ def forward(self, src_ids, tgt_ids):
286
+ src = self.embed_tokens(src_ids)
287
+ src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator]
288
+ tgt = self.embed_tokens(tgt_ids)
289
+ tgt = self.bn(tgt)
290
+ x = self.transformer(src, tgt)
291
+ return self.output_proj(x)
292
+
293
+ def get_loss(self, input, output):
294
+ _, tgt = input
295
+ return nn.functional.cross_entropy(
296
+ output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum"
297
+ )
298
+
299
+ def run_backward(self, loss):
300
+ loss.backward()
301
+
302
+ @staticmethod
303
+ def init(
304
+ group: dist.ProcessGroup,
305
+ fsdp_init_mode: FSDPInitMode,
306
+ cuda_init_mode: CUDAInitMode,
307
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
308
+ deterministic: bool = False,
309
+ add_bn: bool = True,
310
+ ) -> Union[nn.Module, FSDP]:
311
+ """
312
+ Initializes a :class:`TransformerWithSharedParams` instance.
313
+
314
+ Args:
315
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
316
+ any modules with FSDP. If ``RECURSIVE``, then wraps with
317
+ top-level FSDP. By default, the top-level FSDP uses the
318
+ ``ModuleWrapPolicy`` for encoder and decoder layers, but a
319
+ different auto wrap policy may be specified via
320
+ ``fsdp_kwargs``.
321
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
322
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
323
+ forwarded to the FSDP constructor.
324
+ deterministic (bool): Whether to make the model deterministic
325
+ across constructions.
326
+ add_bn (bool): Whether to include batch norm in the model.
327
+ """
328
+
329
+ if fsdp_kwargs is None:
330
+ fsdp_kwargs = {}
331
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
332
+ if isinstance(group, tuple):
333
+ pg = group[0]
334
+ else:
335
+ pg = group
336
+ return TransformerWithSharedParams(
337
+ pg, cuda_init_mode, add_bn, deterministic
338
+ )
339
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
340
+ # Default to the `ModuleWrapPolicy`
341
+ if "auto_wrap_policy" not in fsdp_kwargs:
342
+ auto_wrap_policy = ModuleWrapPolicy(
343
+ {
344
+ TransformerEncoderLayer,
345
+ TransformerDecoderLayer,
346
+ }
347
+ )
348
+ else:
349
+ auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy")
350
+
351
+ if (
352
+ "sharding_strategy" in fsdp_kwargs
353
+ and fsdp_kwargs["sharding_strategy"]
354
+ in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2}
355
+ and not isinstance(group, tuple)
356
+ ):
357
+ fsdp_pg = None
358
+ else:
359
+ fsdp_pg = group
360
+
361
+ if isinstance(group, tuple):
362
+ tformer_pg = group[0]
363
+ else:
364
+ tformer_pg = group
365
+
366
+ m = TransformerWithSharedParams(
367
+ tformer_pg, cuda_init_mode, add_bn, deterministic
368
+ )
369
+ fsdp_model = FSDP(
370
+ m,
371
+ fsdp_pg,
372
+ auto_wrap_policy=auto_wrap_policy,
373
+ **fsdp_kwargs,
374
+ )
375
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
376
+ fsdp_model = fsdp_model.cuda()
377
+ return fsdp_model
378
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
379
+
380
+ def get_ignored_modules(self):
381
+ return [self.transformer]
382
+
383
+
384
+ class NestedWrappedModule(FSDPTestModel):
385
+ def __init__(
386
+ self,
387
+ group: dist.ProcessGroup,
388
+ wrap_fsdp: bool,
389
+ cuda_init_mode: CUDAInitMode,
390
+ deterministic: bool,
391
+ **fsdp_kwargs,
392
+ ):
393
+ super().__init__()
394
+ self.rank = group.rank()
395
+ self.world_size = group.size()
396
+ move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
397
+
398
+ def _maybe_wrap(layer):
399
+ if wrap_fsdp:
400
+ return FSDP(layer, group, **fsdp_kwargs)
401
+ return layer
402
+
403
+ if deterministic:
404
+ torch.manual_seed(0)
405
+ self.module = nn.Sequential(
406
+ _maybe_cuda(nn.Linear(8, 4), move_to_cuda),
407
+ _maybe_wrap(
408
+ nn.Sequential(
409
+ _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
410
+ _maybe_cuda(nn.Linear(16, 16), move_to_cuda),
411
+ ),
412
+ ),
413
+ _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)),
414
+ _maybe_cuda(nn.Linear(4, 8), move_to_cuda),
415
+ )
416
+
417
+ def get_input(self, device):
418
+ torch.manual_seed(1 + self.rank) # keep everything deterministic
419
+ return (torch.rand(4, 8, device=device),)
420
+
421
+ def forward(self, x):
422
+ return self.module(x)
423
+
424
+ def get_loss(self, input, output):
425
+ loss = output.sum()
426
+ return loss
427
+
428
+ def run_backward(self, loss):
429
+ loss.backward()
430
+
431
+ @staticmethod
432
+ def init(
433
+ group: dist.ProcessGroup,
434
+ fsdp_init_mode: FSDPInitMode,
435
+ cuda_init_mode: CUDAInitMode,
436
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
437
+ deterministic: bool = False,
438
+ ) -> nn.Module:
439
+ """
440
+ Initializes a :class:`NestedWrappedModule` instance.
441
+
442
+ Args:
443
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
444
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
445
+ modules with FSDP but not the top-level module. The model may
446
+ later be wrapped with a top-level FSDP external to this method
447
+ if desired.
448
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
449
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
450
+ forwarded to the FSDP constructor.
451
+ deterministic (bool): Whether to make the model deterministic
452
+ across constructions.
453
+ """
454
+ if fsdp_kwargs is None:
455
+ fsdp_kwargs = {}
456
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
457
+ return NestedWrappedModule(
458
+ group,
459
+ wrap_fsdp=False,
460
+ cuda_init_mode=cuda_init_mode,
461
+ deterministic=deterministic,
462
+ )
463
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
464
+ # Does not wrap with top-level FSDP
465
+ fsdp_model = NestedWrappedModule(
466
+ group,
467
+ wrap_fsdp=True,
468
+ cuda_init_mode=cuda_init_mode,
469
+ deterministic=deterministic,
470
+ **fsdp_kwargs,
471
+ )
472
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
473
+ fsdp_model = fsdp_model.cuda()
474
+ return fsdp_model
475
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
476
+
477
+
478
+ class AlwaysWrapNestedWrappedModule(NestedWrappedModule):
479
+ @staticmethod
480
+ def init(
481
+ group: dist.ProcessGroup,
482
+ fsdp_init_mode: FSDPInitMode,
483
+ cuda_init_mode: CUDAInitMode,
484
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
485
+ deterministic: bool = False,
486
+ ):
487
+ """
488
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
489
+ :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this
490
+ wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap
491
+ policy.
492
+ """
493
+ model = super(
494
+ AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule
495
+ ).init(
496
+ group=group,
497
+ fsdp_init_mode=FSDPInitMode.NO_FSDP,
498
+ cuda_init_mode=cuda_init_mode,
499
+ fsdp_kwargs=fsdp_kwargs,
500
+ deterministic=deterministic,
501
+ )
502
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
503
+ return model
504
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
505
+ fsdp_kwargs = fsdp_kwargs or {}
506
+ fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs)
507
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
508
+ fsdp_model = fsdp_model.cuda()
509
+ return fsdp_model
510
+
511
+
512
+ class NonUniformReqGradNWM(NestedWrappedModule):
513
+ def __init__(
514
+ self,
515
+ group: dist.ProcessGroup,
516
+ wrap_fsdp: bool,
517
+ cuda_init_mode: CUDAInitMode,
518
+ deterministic: bool,
519
+ **fsdp_kwargs,
520
+ ):
521
+ super(NestedWrappedModule, self).__init__()
522
+ # This `__init__` only differs from `NestedWrappedModule.__init__` in that
523
+ # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential`
524
+ # container. This arrangement results in all elements of the last two parameters
525
+ # residing on a single rank. Freezing all parameters except those two allows us
526
+ # to verify that `ShardedGradScaler` accommodates situations where some ranks
527
+ # have no (non-zero sized) parameter shards.
528
+ self.rank = group.rank()
529
+ self.world_size = group.size()
530
+ move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
531
+
532
+ def _maybe_wrap(layer):
533
+ if wrap_fsdp:
534
+ return FSDP(layer, group, **fsdp_kwargs)
535
+ return layer
536
+
537
+ if deterministic:
538
+ torch.manual_seed(0)
539
+ self.module = nn.Sequential(
540
+ _maybe_cuda(nn.Linear(8, 4), move_to_cuda),
541
+ _maybe_wrap(
542
+ nn.Sequential(
543
+ _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)),
544
+ _maybe_cuda(nn.Linear(16, 16), move_to_cuda),
545
+ ),
546
+ ),
547
+ _maybe_wrap(
548
+ nn.Sequential(
549
+ _maybe_cuda(nn.Linear(16, 4), move_to_cuda),
550
+ _maybe_cuda(nn.Linear(4, 8), move_to_cuda),
551
+ ),
552
+ ),
553
+ )
554
+
555
+ @staticmethod
556
+ def _set_nonuniform_req_grad(model, req_grad_mask) -> None:
557
+ for n, p in model.named_parameters():
558
+ if not re.match(req_grad_mask, n):
559
+ p.requires_grad_(False)
560
+
561
+ @staticmethod
562
+ def init(
563
+ group: dist.ProcessGroup,
564
+ fsdp_init_mode: FSDPInitMode,
565
+ cuda_init_mode: CUDAInitMode,
566
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
567
+ deterministic: bool = False,
568
+ ):
569
+ """
570
+ Initializes a :class:`NestedWrappedModule` instance, but unlike
571
+ :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential`
572
+ container to enable the desired non-uniform ``requires_grad``
573
+ ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP``
574
+ init modes, freezes all parameters except the last two to validate
575
+ ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in
576
+ FSDP ``use_orig_params=True`` mode.
577
+ """
578
+ # The parameters that should remain unfrozen are in `module.2.1`. The regex
579
+ # pattern below matches the relevant parameter names both with and without
580
+ # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present.
581
+ req_grad_pattern = re.compile(r"module\.2.*\.1.*")
582
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
583
+ ddp_model = NonUniformReqGradNWM(
584
+ group,
585
+ wrap_fsdp=False,
586
+ cuda_init_mode=cuda_init_mode,
587
+ deterministic=deterministic,
588
+ )
589
+ NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern)
590
+ return ddp_model
591
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
592
+ if fsdp_kwargs is None:
593
+ fsdp_kwargs = {}
594
+ fsdp_model = NonUniformReqGradNWM(
595
+ group,
596
+ wrap_fsdp=True,
597
+ cuda_init_mode=cuda_init_mode,
598
+ deterministic=deterministic,
599
+ **fsdp_kwargs,
600
+ )
601
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
602
+ fsdp_model = fsdp_model.cuda()
603
+ NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern)
604
+ return fsdp_model
605
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
606
+
607
+
608
+ class ModuleWithDelay(FSDPTestModel):
609
+ """This class wraps a :class:`FSDPTestModel` to optionally add a delay
610
+ after computing the loss and/or before the gradient reduction."""
611
+
612
+ def __init__(
613
+ self,
614
+ module: nn.Module,
615
+ delay_after_loss_ms: int,
616
+ delay_before_reduction_ms: int,
617
+ ):
618
+ super().__init__()
619
+ self.delay_after_loss_ms = delay_after_loss_ms
620
+ self.delay_before_reduction_ms = delay_before_reduction_ms
621
+ self.module = module
622
+
623
+ def get_input(self, device):
624
+ return self.module.get_input(device)
625
+
626
+ def forward(self, x):
627
+ return self.module(x)
628
+
629
+ def get_loss(self, input, output):
630
+ loss = self.module.get_loss(input, output)
631
+ if self.delay_after_loss_ms > 0:
632
+ torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms()))
633
+ return loss
634
+
635
+ def run_backward(self, loss):
636
+ orig_reduce_scatter = torch.distributed.reduce_scatter_tensor
637
+
638
+ def _delayed_reduce_scatter(*args, **kwargs):
639
+ if self.delay_before_reduction_ms > 0:
640
+ torch.cuda._sleep(
641
+ int(self.delay_before_reduction_ms * get_cycles_per_ms())
642
+ )
643
+ return orig_reduce_scatter(*args, **kwargs)
644
+
645
+ with mock.patch(
646
+ "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter
647
+ ):
648
+ self.module.run_backward(loss)
649
+
650
+ @staticmethod
651
+ def init(
652
+ module_class: Type[FSDPTestModel],
653
+ *model_args: Any,
654
+ delay_after_loss_ms: int,
655
+ delay_before_reduction_ms: int,
656
+ **model_kwargs: Any,
657
+ ):
658
+ """
659
+ Args:
660
+ module_class (Type[FSDPTestModel]): Wrapped module class to which
661
+ to add delays.
662
+ model_args: Positional arguments forwarded to the ``module_class``
663
+ ``init()``.
664
+ delay_after_loss_ms (int): Delay after computing the loss/before
665
+ the optimizer step (in ms).
666
+ delay_before_reduction_ms (int): Delay before reduce-scattering
667
+ gradients (in ms).
668
+ model_kwargs: Keyword arguments forwarded to the ``module_class``
669
+ ``init()``.
670
+ """
671
+ return ModuleWithDelay(
672
+ module_class.init(*model_args, **model_kwargs),
673
+ delay_after_loss_ms,
674
+ delay_before_reduction_ms,
675
+ )
676
+
677
+
678
+ class NestedWrappedModuleWithDelay(ModuleWithDelay):
679
+ @staticmethod
680
+ def init( # type: ignore[override]
681
+ group: dist.ProcessGroup,
682
+ fsdp_init_mode: FSDPInitMode,
683
+ cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER,
684
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
685
+ deterministic: bool = False,
686
+ delay_after_loss_ms: int = 0,
687
+ delay_before_reduction_ms: int = 0,
688
+ ):
689
+ return ModuleWithDelay.init(
690
+ NestedWrappedModule,
691
+ group=group,
692
+ fsdp_init_mode=fsdp_init_mode,
693
+ cuda_init_mode=cuda_init_mode,
694
+ fsdp_kwargs=fsdp_kwargs,
695
+ deterministic=deterministic,
696
+ delay_after_loss_ms=delay_after_loss_ms,
697
+ delay_before_reduction_ms=delay_before_reduction_ms,
698
+ )
699
+
700
+
701
+ class DummyDDP(nn.Module):
702
+ def __init__(self, module):
703
+ super().__init__()
704
+ self.module = module
705
+
706
+ def forward(self, *args, **kwargs):
707
+ return self.module(*args, **kwargs)
708
+
709
+
710
+ class MixtureOfExperts(NestedWrappedModule):
711
+ def __init__(
712
+ self,
713
+ group: dist.ProcessGroup,
714
+ wrap_fsdp: bool,
715
+ cuda_init_mode: CUDAInitMode,
716
+ delay_before_free_ms: int,
717
+ deterministic: bool,
718
+ **fsdp_kwargs,
719
+ ):
720
+ super().__init__(
721
+ group=group,
722
+ wrap_fsdp=wrap_fsdp,
723
+ cuda_init_mode=cuda_init_mode,
724
+ deterministic=deterministic,
725
+ )
726
+ self.group = group
727
+ self.delay_before_free_ms = delay_before_free_ms
728
+ self.wrap_fsdp = wrap_fsdp
729
+ self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE
730
+ if deterministic:
731
+ # Give each rank different expert parameters
732
+ torch.manual_seed(42 + self.rank)
733
+ d_expert = 23
734
+ d_shared = 12
735
+ d_input = 8
736
+ expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda)
737
+
738
+ self.num_expert_params = sum(p.numel() for p in expert.parameters())
739
+ for p in expert.parameters():
740
+ p.expert = True # type: ignore[attr-defined]
741
+
742
+ if deterministic:
743
+ # Keep all other parameters the same across ranks
744
+ torch.manual_seed(0)
745
+
746
+ shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda)
747
+
748
+ if wrap_fsdp:
749
+ # we create a process group of size 1 for the expert params
750
+ expert_group = torch.distributed.new_group(
751
+ [group.rank()]
752
+ ) # world size 1 means no shard
753
+ expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
754
+ shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
755
+
756
+ self.module = nn.Sequential(
757
+ _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda),
758
+ shared,
759
+ expert,
760
+ _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda),
761
+ )
762
+
763
+ def forward(self, x):
764
+ if self.delay_before_free_ms > 0:
765
+ expert = self.module[2]
766
+ if isinstance(expert, FSDP):
767
+ orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
768
+
769
+ def _delayed_reshard(*args, **kwargs):
770
+ torch.cuda._sleep(
771
+ int(self.delay_before_free_ms * get_cycles_per_ms())
772
+ )
773
+ return orig_reshard(*args, **kwargs)
774
+
775
+ # This patch covers any `import torch..._reshard` uses.
776
+ with mock.patch(
777
+ "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard
778
+ ):
779
+ return self.module(x)
780
+
781
+ return self.module(x)
782
+
783
+ def run_backward(self, loss):
784
+ loss.backward()
785
+ # Manually reduce gradients if not wrapped in FullyShardedDataParallel
786
+ if not self.wrap_fsdp:
787
+ with torch.no_grad():
788
+ for p in self.parameters():
789
+ if hasattr(p, "expert"):
790
+ continue # these params don't need grad reduction
791
+ if p.grad is not None:
792
+ p.grad.div_(self.world_size)
793
+ torch.distributed.all_reduce(p.grad, group=self.group)
794
+
795
+ @staticmethod
796
+ def init(
797
+ group: dist.ProcessGroup,
798
+ fsdp_init_mode: FSDPInitMode,
799
+ cuda_init_mode: CUDAInitMode,
800
+ fsdp_kwargs: Optional[Dict[str, Any]] = None,
801
+ deterministic: bool = False,
802
+ delay_before_free_ms: int = 0,
803
+ ):
804
+ """
805
+ Initializes a :class:`MixtureOfExperts` instance.
806
+
807
+ Args:
808
+ fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
809
+ any modules with FSDP. If ``RECURSIVE``, then wraps some nested
810
+ modules with FSDP, including the expert and shared layers, but
811
+ not the top-level module. The model may later be wrapped with a
812
+ top-level FSDP external to this method if desired.
813
+ cuda_init_mode (CUDAInitMode): Determines model movement to CUDA.
814
+ fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
815
+ forwarded to the FSDP constructor.
816
+ deterministic (bool): Whether to make the model deterministic
817
+ across constructions.
818
+ delay_before_free_ms (int): Delay before resharding expert
819
+ parameters in the forward pass (in ms).
820
+ """
821
+ if fsdp_kwargs is None:
822
+ fsdp_kwargs = {}
823
+ if fsdp_init_mode == FSDPInitMode.NO_FSDP:
824
+ return MixtureOfExperts(
825
+ group,
826
+ wrap_fsdp=False,
827
+ cuda_init_mode=cuda_init_mode,
828
+ delay_before_free_ms=delay_before_free_ms,
829
+ deterministic=deterministic,
830
+ )
831
+ elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
832
+ # Does not wrap with top-level FSDP
833
+ fsdp_model = MixtureOfExperts(
834
+ group,
835
+ wrap_fsdp=True,
836
+ cuda_init_mode=cuda_init_mode,
837
+ delay_before_free_ms=delay_before_free_ms,
838
+ deterministic=deterministic,
839
+ **fsdp_kwargs,
840
+ )
841
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
842
+ fsdp_model = fsdp_model.cuda()
843
+ return fsdp_model
844
+ raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
845
+
846
+
847
+ class MLP(nn.Module):
848
+ def __init__(
849
+ self,
850
+ dim: int,
851
+ device: Optional[torch.device] = None,
852
+ *,
853
+ bias: bool = True,
854
+ with_buffer: bool = False,
855
+ dim_multiplier: int = 4,
856
+ ):
857
+ super().__init__()
858
+ self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias)
859
+ self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias)
860
+ if with_buffer:
861
+ self.register_buffer("buffer", torch.randn((dim,), device=device))
862
+ else:
863
+ self.buffer = None
864
+
865
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
866
+ z = self.in_proj(x)
867
+ z = F.relu(z)
868
+ z = self.out_proj(z)
869
+ z = F.relu(z)
870
+ if self.buffer is not None:
871
+ z = z + self.buffer
872
+ return z
873
+
874
+ def reset_parameters(self):
875
+ if self.buffer is not None:
876
+ torch.nn.init.normal_(self.buffer)
877
+
878
+
879
+ class MLPStack(nn.Sequential):
880
+ def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False):
881
+ modules: List[nn.Module] = [
882
+ # Use multiplier of 3 to exercise uneven case
883
+ MLP(mlp_dim, dim_multiplier=3),
884
+ MLP(mlp_dim),
885
+ MLP(mlp_dim, dim_multiplier=3),
886
+ ]
887
+ if with_seq_parallel:
888
+ modules.append(nn.LayerNorm(mlp_dim, bias=False))
889
+ super().__init__(*modules)
890
+ self.with_seq_parallel = with_seq_parallel
891
+
892
+ def parallelize(
893
+ self,
894
+ tp_mesh: DeviceMesh,
895
+ dp_mesh: DeviceMesh,
896
+ use_activation_checkpointing: bool,
897
+ **fsdp_kwargs,
898
+ ) -> "MLPStack":
899
+ parallelize_plan = {
900
+ # Pass `use_local_output=False` to keep as DTensor to preserve
901
+ # uneven activation dims
902
+ "0.in_proj": ColwiseParallel(use_local_output=False),
903
+ "0.out_proj": RowwiseParallel(use_local_output=False),
904
+ "1.in_proj": ColwiseParallel(use_local_output=False),
905
+ "1.out_proj": RowwiseParallel(use_local_output=False),
906
+ "2.in_proj": ColwiseParallel(use_local_output=False),
907
+ "2.out_proj": RowwiseParallel(output_layouts=Shard(1))
908
+ if self.with_seq_parallel
909
+ else RowwiseParallel(),
910
+ }
911
+ if self.with_seq_parallel:
912
+ parallelize_plan["3"] = SequenceParallel(sequence_dim=1)
913
+ parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan)
914
+ for module in self:
915
+ if isinstance(module, nn.LayerNorm):
916
+ continue
917
+ if use_activation_checkpointing:
918
+ checkpoint(module)
919
+ fully_shard(module, mesh=dp_mesh, **fsdp_kwargs)
920
+ fully_shard(self, mesh=dp_mesh, **fsdp_kwargs)
921
+ return self
922
+
923
+
924
+ class DoubleLinear(nn.Module):
925
+ """
926
+ This can be used for returning multiple outputs from a module
927
+ (``use_second_linear=True``) or for having an unused module (``False``).
928
+ """
929
+
930
+ def __init__(self, dim: int, use_second_linear: bool = True):
931
+ super().__init__()
932
+ self.lin1 = nn.Linear(dim, dim)
933
+ self.lin2 = nn.Linear(dim, dim)
934
+ self.relu = nn.ReLU()
935
+ self.use_second_linear = use_second_linear
936
+
937
+ def forward(
938
+ self, x: torch.Tensor
939
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
940
+ if self.use_second_linear:
941
+ return self.relu(self.lin1(x)), self.relu(self.lin2(x))
942
+ return self.relu(self.lin1(x))
943
+
944
+
945
+ # NOTE: For these patch methods, if we want safety under multi-threading (e.g.
946
+ # when using multi-threaded process group), then we want:
947
+ # (1) a barrier immediately after reading the original value to ensure that all
948
+ # threads see the same original value
949
+ # (2) a barrier immediately before restoring the original value to ensure that
950
+ # all threads use the patched value inside the context
951
+ @contextlib.contextmanager
952
+ def patch_all_gather(new_all_gather_into_tensor: Callable):
953
+ orig_all_gather = dist.all_gather_into_tensor
954
+ dist.barrier()
955
+ dist.all_gather_into_tensor = new_all_gather_into_tensor
956
+ try:
957
+ yield
958
+ finally:
959
+ dist.barrier()
960
+ dist.all_gather_into_tensor = orig_all_gather
961
+
962
+
963
+ @contextlib.contextmanager
964
+ def patch_reduce_scatter(new_reduce_scatter_tensor: Callable):
965
+ orig_reduce_scatter = dist.reduce_scatter_tensor
966
+ dist.barrier()
967
+ dist.reduce_scatter_tensor = new_reduce_scatter_tensor
968
+ try:
969
+ yield
970
+ finally:
971
+ dist.barrier()
972
+ dist.reduce_scatter_tensor = orig_reduce_scatter
973
+
974
+
975
+ @contextlib.contextmanager
976
+ def patch_all_reduce(new_all_reduce: Callable):
977
+ orig_all_reduce = dist.all_reduce
978
+ dist.barrier()
979
+ dist.all_reduce = new_all_reduce
980
+ try:
981
+ yield
982
+ finally:
983
+ dist.barrier()
984
+ dist.all_reduce = orig_all_reduce
985
+
986
+
987
+ @no_type_check
988
+ @contextlib.contextmanager
989
+ def patch_unshard(new_unshard: Callable):
990
+ orig_unshard = FSDPParamGroup.unshard
991
+ dist.barrier()
992
+ FSDPParamGroup.unshard = new_unshard
993
+ try:
994
+ yield
995
+ finally:
996
+ dist.barrier()
997
+ FSDPParamGroup.unshard = orig_unshard
998
+
999
+
1000
+ @no_type_check
1001
+ @contextlib.contextmanager
1002
+ def patch_reshard(new_reshard: Callable):
1003
+ orig_reshard = FSDPParamGroup.reshard
1004
+ dist.barrier()
1005
+ FSDPParamGroup.reshard = new_reshard
1006
+ try:
1007
+ yield
1008
+ finally:
1009
+ dist.barrier()
1010
+ FSDPParamGroup.reshard = orig_reshard
1011
+
1012
+
1013
+ @no_type_check
1014
+ @contextlib.contextmanager
1015
+ def patch_post_backward(new_post_backward: Callable):
1016
+ orig_post_backward = FSDPParamGroup.post_backward
1017
+ dist.barrier()
1018
+ FSDPParamGroup.post_backward = new_post_backward
1019
+ try:
1020
+ yield
1021
+ finally:
1022
+ dist.barrier()
1023
+ FSDPParamGroup.post_backward = orig_post_backward
1024
+
1025
+
1026
+ @no_type_check
1027
+ @contextlib.contextmanager
1028
+ def patch_register_post_backward_hook_backward(new_backward: Callable):
1029
+ orig_backward = RegisterPostBackwardFunction.backward
1030
+ dist.barrier()
1031
+ RegisterPostBackwardFunction.backward = new_backward
1032
+ try:
1033
+ yield
1034
+ finally:
1035
+ dist.barrier()
1036
+ RegisterPostBackwardFunction.backward = orig_backward
1037
+
1038
+
1039
+ def reduce_scatter_with_assert(
1040
+ cls,
1041
+ orig_reduce_scatter: Callable,
1042
+ assert_fn: Callable, # `assert_fn(output: Tensor)`
1043
+ *args: Any,
1044
+ **kwargs: Any,
1045
+ ):
1046
+ if len(args) > 0:
1047
+ output = args[0]
1048
+ elif "output" in kwargs:
1049
+ output = kwargs["output"]
1050
+ else:
1051
+ raise AssertionError(
1052
+ f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}"
1053
+ )
1054
+ assert_fn(output)
1055
+ return orig_reduce_scatter(*args, **kwargs)
1056
+
1057
+
1058
+ def check_sharded_parity(
1059
+ cls, # unit test class
1060
+ replicated_module: nn.Module,
1061
+ sharded_module: nn.Module,
1062
+ prefixes_to_ignore: Tuple[str, ...] = (),
1063
+ ):
1064
+ for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip(
1065
+ replicated_module.named_parameters(), sharded_module.named_parameters()
1066
+ ):
1067
+ clean_sharded_name = sharded_name
1068
+ for prefix in prefixes_to_ignore:
1069
+ clean_sharded_name = clean_sharded_name.replace(prefix, "")
1070
+ cls.assertEqual(replicated_name, clean_sharded_name)
1071
+ cls.assertIsInstance(sharded_param, DTensor)
1072
+ assert isinstance(sharded_param, DTensor) # mypy
1073
+ mesh, placements = sharded_param.device_mesh, sharded_param.placements
1074
+ if tuple(placements) == (Shard(0), Shard(0)):
1075
+ raise AssertionError(
1076
+ "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), "
1077
+ "so we cannot check for equality using it"
1078
+ )
1079
+ sharded_ref_param = distribute_tensor(replicated_param, mesh, placements)
1080
+ cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local())
1081
+ if replicated_param.grad is None:
1082
+ cls.assertIsNone(sharded_param.grad)
1083
+ continue
1084
+ cls.assertIsNotNone(sharded_param.grad)
1085
+ sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements)
1086
+ cls.assertIsInstance(sharded_param.grad, DTensor)
1087
+ assert isinstance(sharded_param.grad, DTensor) # mypy
1088
+ cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local())
1089
+
1090
+
1091
+ class FSDPTestMultiThread(MultiThreadedTestCase):
1092
+ @property
1093
+ def world_size(self):
1094
+ return torch.cuda.device_count() if torch.cuda.is_available() else 4
1095
+
1096
+ def setUp(self):
1097
+ super().setUp()
1098
+ self._spawn_threads()
1099
+
1100
+ def run_subtests(self, *args, **kwargs):
1101
+ return run_subtests(self, *args, **kwargs)
1102
+
1103
+ def perThreadSetUp(self):
1104
+ torch._dynamo.reset()
1105
+
1106
+ def perThreadTearDown(self):
1107
+ torch._dynamo.reset()
1108
+
1109
+
1110
+ class FSDPTest(MultiProcessTestCase):
1111
+ def setUp(self):
1112
+ super().setUp()
1113
+ # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`,
1114
+ # which can cause unit test flakiness:
1115
+ # https://github.com/pytorch/pytorch/issues/90848
1116
+ os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0"
1117
+ self._spawn_processes()
1118
+
1119
+ @property
1120
+ def world_size(self):
1121
+ return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4
1122
+
1123
+ @property
1124
+ def process_group(self):
1125
+ return dist.distributed_c10d._get_default_group()
1126
+
1127
+ @property
1128
+ def init_method(self):
1129
+ return f"{FILE_SCHEMA}{self.file_name}"
1130
+
1131
+ def _check_cpu_offload(self, fsdp_model, cpu_offload):
1132
+ self.assertEqual(cpu_offload, fsdp_model.cpu_offload)
1133
+
1134
+ def _check_backward_prefetch(self, fsdp_model, backward_prefetch):
1135
+ self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch)
1136
+
1137
+ def _check_forward_prefetch(self, fsdp_model, forward_prefetch):
1138
+ self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch)
1139
+
1140
+ def run_subtests(self, *args, **kwargs):
1141
+ return run_subtests(self, *args, **kwargs)
1142
+
1143
+ @classmethod
1144
+ def _run(cls, rank, test_name, file_name, pipe, **kwargs):
1145
+ self = cls(test_name)
1146
+ self.rank = rank
1147
+ self.file_name = file_name
1148
+ fake_pg = kwargs.get("fake_pg", False)
1149
+
1150
+ print(f"dist init r={self.rank}, world={self.world_size}")
1151
+
1152
+ # Specify gloo backend to make 'init_process_group()' succeed,
1153
+ # Actual tests will be skipped if there is no enough GPUs.
1154
+ backend = "nccl" if torch.cuda.is_available() else "gloo"
1155
+
1156
+ try:
1157
+ if fake_pg:
1158
+ store = torch.testing._internal.distributed.fake_pg.FakeStore()
1159
+ dist.init_process_group(
1160
+ backend="fake",
1161
+ world_size=self.world_size,
1162
+ rank=rank,
1163
+ store=store,
1164
+ )
1165
+ else:
1166
+ dist.init_process_group(
1167
+ init_method=self.init_method,
1168
+ backend=backend,
1169
+ world_size=int(self.world_size),
1170
+ rank=self.rank,
1171
+ )
1172
+ except RuntimeError as e:
1173
+ if "recompile" in e.args[0]:
1174
+ sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
1175
+
1176
+ raise
1177
+
1178
+ device_ids = None
1179
+ if torch.cuda.is_available() and torch.cuda.device_count():
1180
+ device_id = self.rank % torch.cuda.device_count()
1181
+ torch.cuda.set_device(device_id)
1182
+ device_ids = [device_id]
1183
+
1184
+ # Execute barrier prior to running test to ensure that every process
1185
+ # has finished initialization and that the following test
1186
+ # immediately exiting due to a skip doesn't cause flakiness.
1187
+ dist.barrier(device_ids=device_ids)
1188
+
1189
+ torch._dynamo.reset()
1190
+ self.run_test(test_name, pipe)
1191
+ torch._dynamo.reset()
1192
+
1193
+ dist.barrier(device_ids=device_ids)
1194
+
1195
+ dist.destroy_process_group()
1196
+
1197
+ def _train_for_several_steps(
1198
+ self,
1199
+ model: nn.Module,
1200
+ num_steps: int,
1201
+ autocast: bool,
1202
+ lr: float = 0.01,
1203
+ fsdp_cpu_offload: Optional[CPUOffload] = None,
1204
+ save_model: bool = False,
1205
+ mixed_precision: Optional[MixedPrecision] = None,
1206
+ enable_sharded_grad_scaler: bool = False,
1207
+ use_pure_fp16: bool = False,
1208
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
1209
+ ):
1210
+ cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params
1211
+
1212
+ model_device = next(model.parameters()).device
1213
+ if sharded_grad_scaler_kwargs is None:
1214
+ sharded_grad_scaler_kwargs = {}
1215
+ sharded_grad_scaler = ShardedGradScaler(
1216
+ enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs
1217
+ )
1218
+ # use SGD with momentum instead of Adam, since Adam is scale invariant
1219
+ # and this makes it bad for tests
1220
+ optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
1221
+ for _ in range(num_steps):
1222
+ optim.zero_grad()
1223
+ with torch.amp.autocast("cuda", enabled=autocast):
1224
+ # Inputs always cuda regardless of cpu offloading, or model.device
1225
+ input = model.module.get_input(torch.device("cuda"))
1226
+ if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)):
1227
+ if isinstance(input, torch.Tensor):
1228
+ input = input.half()
1229
+ else:
1230
+ input = tuple(x.half() for x in input)
1231
+ output = model(*input)
1232
+ # Post-forward, if CPU offloading model param should be on CPU.
1233
+ if (
1234
+ cpu_offload_params
1235
+ and isinstance(model, FSDP)
1236
+ # If not resharding after forward, the parameters are still
1237
+ # exposed as unsharded views into the GPU flat parameter
1238
+ and model.sharding_strategy
1239
+ not in NO_RESHARD_AFTER_FORWARD_STRATEGIES
1240
+ ):
1241
+ for p in model.parameters():
1242
+ # Params should always be on CPU
1243
+ self.assertEqual(p.device, torch.device("cpu"))
1244
+
1245
+ loss = model.module.get_loss(input, output).to(model_device)
1246
+ loss = sharded_grad_scaler.scale(loss)
1247
+
1248
+ if not mixed_precision and not use_pure_fp16:
1249
+ assert (
1250
+ loss.dtype == torch.float32
1251
+ ), "loss data type should be float32, as the original \
1252
+ parameter data type is float32."
1253
+ else:
1254
+ if use_pure_fp16:
1255
+ self.assertEqual(loss.dtype, torch.float16)
1256
+ # FSDP loss is fp16, DDP AMP loss is fp32
1257
+ elif isinstance(model, FSDP):
1258
+ assert mixed_precision is not None # mypy
1259
+ self.assertEqual(loss.dtype, mixed_precision.param_dtype)
1260
+ else:
1261
+ self.assertEqual(loss.dtype, torch.float32)
1262
+ model.module.run_backward(loss)
1263
+ # Post-backward, if CPU offloading model params should be on CPU.
1264
+ if cpu_offload_params and isinstance(model, FSDP):
1265
+ for p in model.parameters():
1266
+ # Params should always be on CPU
1267
+ self.assertEqual(p.device, torch.device("cpu"))
1268
+ # Unscale the gradients and step
1269
+ sharded_grad_scaler.step(optim)
1270
+ # Update the scale factor
1271
+ sharded_grad_scaler.update()
1272
+ # if save_model, simulate save + load.
1273
+ if save_model:
1274
+ state_dict = {k: v.clone() for k, v in model.state_dict().items()}
1275
+ # Zero params, if save/load state_dict did not work properly, this
1276
+ # would break the parity test with DDP.
1277
+ _zero_model(model)
1278
+ model.load_state_dict(state_dict)
1279
+
1280
+ if isinstance(model, FSDP):
1281
+ model._assert_state(TrainingState.IDLE)
1282
+ return loss.detach() # type: ignore[possibly-undefined]
1283
+
1284
+ def _test_fsdp_parity(
1285
+ self,
1286
+ model_class: Type[FSDPTestModel],
1287
+ fsdp_init_mode: FSDPInitMode,
1288
+ cuda_init_mode: CUDAInitMode,
1289
+ ref_init_fn: Optional[Callable] = None,
1290
+ num_iters: int = 2,
1291
+ save_model: bool = True,
1292
+ cpu_offload: CPUOffload = CPUOffload(),
1293
+ backward_prefetch: Optional[BackwardPrefetch] = None,
1294
+ sharding_strategy: Optional[ShardingStrategy] = None,
1295
+ mixed_precision: Optional[MixedPrecision] = None,
1296
+ forward_prefetch: bool = False,
1297
+ use_orig_params: bool = False,
1298
+ enable_sharded_grad_scaler: bool = False,
1299
+ use_pure_fp16: bool = False,
1300
+ init_kwargs: Optional[Dict[str, Any]] = None,
1301
+ sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None,
1302
+ **fsdp_kwargs,
1303
+ ):
1304
+ """
1305
+ Tests FSDP training against a reference, which defaults to DDP but
1306
+ may be customized with ``ref_init_fn``.
1307
+
1308
+ Args:
1309
+ model_class (Type[FSDPTestModel]): A model class that inherits from
1310
+ ``FSDPTestModel``, which defines the expected interface.
1311
+ fsdp_init_mode (FSDPInitMode): The mode to initialize the
1312
+ FSDP-wrapped model. This should not be ``NO_FSDP``.
1313
+ ref_init_fn (Optional[Callable]): A callable to invoke that wraps a
1314
+ non-wrapped model to construct the reference model, where this
1315
+ wrapper should provide data parallel semantics. If ``None``,
1316
+ then the callable defaults to the DDP constructor.
1317
+ """
1318
+ assert (
1319
+ fsdp_init_mode != FSDPInitMode.NO_FSDP
1320
+ ), "Expects an FSDP init mode that wraps with FSDP"
1321
+ if init_kwargs is None:
1322
+ init_kwargs = {}
1323
+ lr = 1e-2
1324
+ rank = self.process_group.rank()
1325
+ # Establish reference behavior with DDP
1326
+ model = model_class.init(
1327
+ self.process_group,
1328
+ FSDPInitMode.NO_FSDP,
1329
+ CUDAInitMode.CUDA_BEFORE,
1330
+ deterministic=True,
1331
+ **init_kwargs,
1332
+ )
1333
+ if ref_init_fn is None:
1334
+ ref_model = DDP(model, device_ids=[rank], output_device=rank)
1335
+ else:
1336
+ ref_model = ref_init_fn(model)
1337
+ if use_pure_fp16:
1338
+ ref_model = ref_model.half()
1339
+ ref_loss = self._train_for_several_steps(
1340
+ ref_model,
1341
+ num_iters,
1342
+ autocast=mixed_precision is not None,
1343
+ lr=lr,
1344
+ fsdp_cpu_offload=cpu_offload,
1345
+ mixed_precision=mixed_precision,
1346
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1347
+ use_pure_fp16=use_pure_fp16,
1348
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1349
+ )
1350
+ ddp_params = list(ref_model.parameters())
1351
+ # Check against FSDP behavior
1352
+ fsdp_kwargs.update(
1353
+ {
1354
+ "cpu_offload": cpu_offload,
1355
+ "backward_prefetch": backward_prefetch,
1356
+ "sharding_strategy": sharding_strategy,
1357
+ "mixed_precision": mixed_precision,
1358
+ "forward_prefetch": forward_prefetch,
1359
+ "use_orig_params": use_orig_params,
1360
+ }
1361
+ )
1362
+ try:
1363
+ fsdp_model = model_class.init(
1364
+ self.process_group,
1365
+ fsdp_init_mode,
1366
+ cuda_init_mode,
1367
+ fsdp_kwargs,
1368
+ deterministic=True,
1369
+ **init_kwargs,
1370
+ )
1371
+ except Exception as e:
1372
+ raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e
1373
+ if not isinstance(fsdp_model, FSDP):
1374
+ # Enforce that we wrap with top-level FSDP since we are comparing
1375
+ # assuming a data parallel reference and some test models may not
1376
+ # do so in their `init()` method
1377
+ fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs)
1378
+ if use_pure_fp16:
1379
+ # Change the model parameter dtype after FSDP initialization
1380
+ fsdp_model = fsdp_model.half()
1381
+ if cuda_init_mode == CUDAInitMode.CUDA_AFTER:
1382
+ fsdp_model = fsdp_model.cuda()
1383
+ offload_params = cpu_offload is not None and cpu_offload.offload_params
1384
+ # Offloading parameters with `CUDA_AFTER` should raise an error during
1385
+ # lazy initialization due to the parameter devices not being CPU;
1386
+ # otherwise, all parameter devices should be CPU
1387
+ expects_device_error = (
1388
+ offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER
1389
+ )
1390
+ expects_cpu_device = (
1391
+ offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER
1392
+ )
1393
+ if expects_cpu_device:
1394
+ cpu_device = torch.device("cpu")
1395
+ for param in fsdp_model.parameters():
1396
+ self.assertEqual(param.device, cpu_device)
1397
+ context = (
1398
+ self.assertRaisesRegex(
1399
+ RuntimeError,
1400
+ "An FSDP-managed module with parameter CPU offloading enabled "
1401
+ "has parameters on cuda",
1402
+ )
1403
+ if expects_device_error
1404
+ else nullcontext()
1405
+ )
1406
+ with context:
1407
+ fsdp_loss = self._train_for_several_steps(
1408
+ fsdp_model,
1409
+ num_iters,
1410
+ autocast=False,
1411
+ lr=lr,
1412
+ fsdp_cpu_offload=cpu_offload,
1413
+ save_model=save_model,
1414
+ mixed_precision=mixed_precision,
1415
+ enable_sharded_grad_scaler=enable_sharded_grad_scaler,
1416
+ use_pure_fp16=use_pure_fp16,
1417
+ sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
1418
+ )
1419
+ # No need to check for parameter and loss parity if expecting an error
1420
+ if expects_device_error:
1421
+ return
1422
+ # Check parameter devices are CPU if offloading to CPU before calling
1423
+ # `get_full_params()`, which will cast the parameters to FP32
1424
+ if offload_params:
1425
+ cpu_device = torch.device("cpu")
1426
+ for param in fsdp_model.parameters():
1427
+ self.assertEqual(param.device, cpu_device)
1428
+ fsdp_loss = fsdp_loss.cuda()
1429
+ fsdp_unsharded_params = get_full_params(fsdp_model)
1430
+ # Do not check dtype since the reference DDP loss may not be the same
1431
+ # dtype as the FSDP loss in the case of mixed precision
1432
+ torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False)
1433
+ # Do not check for parameter parity if using mixed precision since (1)
1434
+ # the DDP parameters are in FP16 (from `half()`) while the FSDP
1435
+ # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs
1436
+ # the optimizer in FP16 while FSDP runs it in FP32
1437
+ # TODO: Disable checking the parameters for pure FP16 due to floating
1438
+ # point inaccuracy. Note that this means that the backward pass is not
1439
+ # checked: https://github.com/pytorch/pytorch/issues/90784
1440
+ if mixed_precision is None and not use_pure_fp16:
1441
+ self.assertEqual(
1442
+ ddp_params,
1443
+ fsdp_unsharded_params,
1444
+ exact_device=True,
1445
+ msg="FSDP did not match DDP",
1446
+ )
1447
+
1448
+
1449
+ def test_compiled_fsdp(compile_compute_on_module: Optional[type] = None):
1450
+ def fully_shard_with_compiled_compute(*args, **kwargs):
1451
+ torch.distributed._composable.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator]
1452
+ if compile_compute_on_module is None or isinstance(
1453
+ args[0], compile_compute_on_module
1454
+ ):
1455
+ args[0].compile()
1456
+
1457
+ class FullyShardMode(Enum):
1458
+ EAGER = auto()
1459
+ COMPILED_COMPUTE = auto()
1460
+
1461
+ def decorator(func):
1462
+ @wraps(func)
1463
+ def wrapper(*args, **kwargs):
1464
+ original_fully_shard = torch.distributed._composable.fsdp.fully_shard
1465
+ for mode in FullyShardMode:
1466
+ if mode != FullyShardMode.EAGER and not has_triton():
1467
+ warnings.warn("Inductor on GPU needs Triton and recent GPU arch")
1468
+ continue
1469
+ # barrier to ensure thread reading the same value
1470
+ original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks
1471
+ original_compile_threads = torch._inductor.config.compile_threads
1472
+ torch.distributed.barrier()
1473
+
1474
+ if mode == FullyShardMode.EAGER:
1475
+ fully_shard_patch = original_fully_shard
1476
+ elif mode == FullyShardMode.COMPILED_COMPUTE:
1477
+ torch._dynamo.config.skip_fsdp_hooks = True
1478
+ torch._inductor.config.compile_threads = 1
1479
+ fully_shard_patch = fully_shard_with_compiled_compute # type: ignore[assignment]
1480
+ else:
1481
+ raise NotImplementedError(
1482
+ f"Need to implement FullyShardMode={mode}"
1483
+ )
1484
+
1485
+ # fully_shard is imported as a global
1486
+ # through `from ... import fully_shard`
1487
+ func.__globals__[original_fully_shard.__name__] = fully_shard_patch
1488
+ func(*args, **kwargs)
1489
+ # other threads use patched func before this thread restores
1490
+ torch.distributed.barrier()
1491
+ func.__globals__[original_fully_shard.__name__] = original_fully_shard
1492
+ torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks
1493
+ torch._inductor.config.compile_threads = original_compile_threads
1494
+
1495
+ return wrapper
1496
+
1497
+ return decorator
1498
+
1499
+
1500
+ class SkipModule(nn.Module):
1501
+ def __init__(self) -> None:
1502
+ super().__init__()
1503
+ self.lin = nn.Linear(10, 10, bias=False)
1504
+
1505
+ def forward(self, x):
1506
+ return self.lin(x)
1507
+
1508
+
1509
+ class NestedLinear(nn.Module):
1510
+ def __init__(self, fsdp_wrap):
1511
+ super().__init__()
1512
+ if fsdp_wrap:
1513
+ self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda())
1514
+ else:
1515
+ self.nested_linear = nn.Linear(10, 10, bias=False).cuda()
1516
+
1517
+ def forward(self, x):
1518
+ return self.nested_linear(x)
1519
+
1520
+
1521
+ class SkipModel(nn.Module):
1522
+ def __init__(self, double_nest):
1523
+ super().__init__()
1524
+ self.linear = nn.Linear(10, 10, bias=False).cuda()
1525
+ self.linear_skip = SkipModule().cuda()
1526
+ self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest))
1527
+
1528
+ def forward(self, x):
1529
+ x = self.linear(x)
1530
+ x = self.linear_skip(x)
1531
+ x = self.nested_linear(x)
1532
+ return x
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ import torch
5
+ import torch.cuda
6
+ import torch.jit
7
+ import torch.jit._logging
8
+ import torch.jit.frontend
9
+ import torch.jit.quantized
10
+
11
+ # Testing utils
12
+ from torch.testing._internal.common_dtype import floating_and_complex_types_and
13
+ from torch.testing._internal.common_utils import TestCase, \
14
+ freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors
15
+ from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
16
+
17
+ # Standard library
18
+ from itertools import chain
19
+ from typing import List, Union
20
+ from torch._C import TensorType
21
+
22
+ import io
23
+
24
+ def check_output_types(self, func, ref_outputs, args, kwargs):
25
+ graph = getattr(func, 'last_graph', None)
26
+ types = [o.type() for o in graph.outputs()]
27
+ self.assertTrue(len(types) == 1)
28
+ t = types[0]
29
+ torch._C._jit_assert_is_instance(ref_outputs, t)
30
+
31
+ # Test names in this set are only checked for a single derivative
32
+ nn_functional_single_grad = frozenset('test_nn_' + name for name in [
33
+ 'pdist',
34
+ 'multilabel_margin_loss',
35
+ 'max_unpool3d',
36
+ 'multi_margin_loss',
37
+ 'binary_cross_entropy',
38
+ 'binary_cross_entropy_size_average',
39
+ 'ctc_loss',
40
+ 'grid_sample',
41
+ ])
42
+
43
+ def check_against_reference(self, func, reference_func, output_func, args, kwargs=None,
44
+ allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False):
45
+ """Verifies a function performs identically to some reference implementation.
46
+
47
+ Commonly, this is used to verify that a JIT implementation
48
+ (output_func) matches the behavior of the eager implementation
49
+ (reference_func).
50
+ """
51
+ kwargs = kwargs if kwargs else {}
52
+
53
+ def allSum(vs):
54
+ if isinstance(vs, torch.Tensor):
55
+ vs = (vs,)
56
+ return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum()
57
+ for i, v in enumerate(vs)
58
+ if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16))
59
+
60
+ def clone_tensor(t, preserve_requires_grad):
61
+ require_grad = preserve_requires_grad and t.requires_grad
62
+ return t.detach().clone().requires_grad_(require_grad)
63
+
64
+ def clone_inputs(preserve_requires_grad: bool):
65
+ inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
66
+
67
+ for arg in args:
68
+ if isinstance(arg, torch.Tensor):
69
+ inputs.append(clone_tensor(arg, preserve_requires_grad))
70
+ elif is_iterable_of_tensors(arg):
71
+ inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg])
72
+ else:
73
+ inputs.append(arg)
74
+
75
+ return inputs
76
+
77
+ # Returns tensors in args that requires_grad, including tensors in TensorList args
78
+ def get_recording_tensors(args):
79
+ recording_tensors: List[torch.Tensor] = []
80
+
81
+ for arg in args:
82
+ if isinstance(arg, torch.Tensor) and arg.requires_grad:
83
+ recording_tensors.append(arg)
84
+ elif is_iterable_of_tensors(arg):
85
+ recording_tensors.extend(filter(lambda t: t.requires_grad, arg))
86
+
87
+ return recording_tensors
88
+
89
+ # test no gradients case
90
+ nograd_inputs = clone_inputs(preserve_requires_grad=False)
91
+ outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs)
92
+ with enable_profiling_mode_for_profiling_tests():
93
+ outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs)
94
+ self.assertEqual(outputs, outputs_test)
95
+
96
+ if check_types:
97
+ check_output_types(self, func, outputs_test, nograd_inputs, kwargs)
98
+
99
+ if no_grad:
100
+ # skip grad tests
101
+ return
102
+
103
+ with enable_profiling_mode_for_profiling_tests():
104
+ # test single grad case
105
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
106
+ recording_tensors = get_recording_tensors(recording_inputs)
107
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
108
+ grads = torch.autograd.grad(allSum(outputs), recording_tensors,
109
+ allow_unused=allow_unused)
110
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
111
+ grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors,
112
+ allow_unused=allow_unused)
113
+ self.assertEqual(outputs, outputs_test)
114
+ self.assertEqual(grads, grads_test)
115
+ # test the grad grad case
116
+ if self._testMethodName in nn_functional_single_grad or no_gradgrad:
117
+ return
118
+
119
+ outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs))
120
+ l1 = allSum(outputs)
121
+ grads = torch.autograd.grad(l1, recording_tensors, create_graph=True,
122
+ allow_unused=allow_unused)
123
+
124
+ l2 = (allSum(grads) * l1)
125
+ grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused)
126
+ recording_inputs = clone_inputs(preserve_requires_grad=True)
127
+ recording_tensors = get_recording_tensors(recording_inputs)
128
+ outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs))
129
+ l1_test = allSum(outputs_test)
130
+ grads_test = torch.autograd.grad(
131
+ l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused)
132
+
133
+ l2_test = (allSum(grads_test) * l1_test)
134
+ grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused)
135
+
136
+ self.assertEqual(outputs, outputs_test)
137
+ self.assertEqual(grads, grads_test)
138
+ for g2, g2_test in zip(grads2, grads2_test):
139
+ if g2 is None and g2_test is None:
140
+ continue
141
+ self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4)
142
+
143
+ class JitCommonTestCase(TestCase):
144
+ def createFunctionFromGraph(self, trace):
145
+ graph = trace if isinstance(trace, torch._C.Graph) else trace.graph()
146
+ return torch._C._create_function_from_graph("forward", graph)
147
+
148
+ def assertExportImport(self, trace, inputs):
149
+ m = self.createFunctionFromGraph(trace)
150
+ self.assertExportImportModule(m, inputs)
151
+
152
+ def assertExportImportModule(self, m, inputs):
153
+ m_import = self.getExportImportCopy(m)
154
+ a = self.runAndSaveRNG(m, inputs)
155
+ b = self.runAndSaveRNG(m_import, inputs)
156
+ self.assertEqual(a, b, "Results of original model and "
157
+ "exported/imported version of model differed")
158
+
159
+ def runAndSaveRNG(self, func, inputs, kwargs=None):
160
+ kwargs = kwargs if kwargs else {}
161
+ with freeze_rng_state():
162
+ results = func(*inputs, **kwargs)
163
+ return results
164
+
165
+ def getExportImportCopy(self, m, also_test_file=True, map_location=None):
166
+ buffer = io.BytesIO()
167
+ torch.jit.save(m, buffer)
168
+ buffer.seek(0)
169
+ imported = torch.jit.load(buffer, map_location=map_location)
170
+
171
+ if not also_test_file:
172
+ return imported
173
+
174
+ with TemporaryFileName() as fname:
175
+ torch.jit.save(imported, fname)
176
+ return torch.jit.load(fname, map_location=map_location)
177
+
178
+ def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph,
179
+ fusion_nodes_not_found, non_fusible_nodes_being_fused,
180
+ fusion_nodes_found, nodes_in_diff_graph):
181
+ err_msg = "\nFailure in testing nodes' autodifferentiation. "
182
+ if should_autodiff_node:
183
+ err_msg += "One or more nodes were expected to be autodiffed, " \
184
+ "but were not found in specified fusible/nonfusible " \
185
+ "DifferentiableGraph groups. \nSpecifically:"
186
+ # The node is intended to appear in a differentiable graph but doesn't
187
+ diff_nodes_missing = []
188
+ # The node is intended to appear in a differentiable graph
189
+ # outside of a fusion group but instead is in a fusion group
190
+ diff_nodes_in_fusion = []
191
+ # The node is intended to appear in a fusion group but doesn't
192
+ fusion_nodes_missing = []
193
+ # The node is intended to appear in a fusion group but instead
194
+ # is just in an outer differentiable graph
195
+ fusion_nodes_in_diff = []
196
+ for node in nodes_not_in_diff_graph:
197
+ if node in non_fusible_nodes_being_fused:
198
+ diff_nodes_in_fusion.append(node)
199
+ else:
200
+ diff_nodes_missing.append(node)
201
+ for node in fusion_nodes_not_found:
202
+ if node in nodes_in_diff_graph:
203
+ fusion_nodes_in_diff.append(node)
204
+ else:
205
+ fusion_nodes_missing.append(node)
206
+ if len(diff_nodes_missing) > 0:
207
+ err_msg += f"\n {diff_nodes_missing} were not in one of the " \
208
+ "DifferentiableGraphs when they were expected to be. " \
209
+ "Did you intend for these nodes to be autodiffed? " \
210
+ "If not, remove them from the list of nonfusible nodes."
211
+ if len(diff_nodes_in_fusion) > 0:
212
+ err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \
213
+ "when they were expected to be just in a DifferentiableGraph. If it was " \
214
+ "intended for these nodes to be in FusionGroups, reclassify these nodes as " \
215
+ "fusible nodes. If these nodes were not intended to be fused, your " \
216
+ "autodifferentiation logic might be wrong."
217
+ if len(fusion_nodes_missing) > 0:
218
+ err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \
219
+ "of the DifferentiableGraphs when they were expected to be. " \
220
+ "They were also not found in an outer DifferentiableGraph. Did you " \
221
+ "intend for these nodes to be autodifferentiated? If not, you should " \
222
+ "remove these nodes from the test's fusible nodes. Otherwise your " \
223
+ "autodifferentiation logic might be wrong."
224
+ if len(fusion_nodes_in_diff) > 0:
225
+ err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \
226
+ "of the DifferentiableGraphs when they were expected to be, " \
227
+ "instead they were found just in an outer DifferentiableGraph. " \
228
+ "Did you intend for these nodes to be fused? If not, you should " \
229
+ "move these nodes into the test's nonfusible nodes. Otherwise your " \
230
+ "autodifferentiation logic might be wrong."
231
+ else:
232
+ err_msg += "One or more nodes were not expected to be autodiffed " \
233
+ "but were found in a DifferentiableGraph or in a FusionGroup " \
234
+ "of a DifferentiableGraph. Did you intend for these nodes to be " \
235
+ "autodiffed? If so, change this test to expect autodifferentiation. " \
236
+ "\nSpecifically:"
237
+ if len(fusion_nodes_found) > 0:
238
+ err_msg += f"\n {fusion_nodes_found} were not expected to be in " \
239
+ "one of the DifferentiableGraphs, but appeared in a FusionGroup " \
240
+ "of a DifferentiableGraph. "
241
+ if len(nodes_in_diff_graph) > 0:
242
+ err_msg += f"\n {nodes_in_diff_graph} were not expected to " \
243
+ "be in one of the DifferentiableGraphs but were."
244
+ return err_msg
245
+
246
+ def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes):
247
+ diff_nodes = graph.findAllNodes('prim::DifferentiableGraph')
248
+ diff_subgraphs = [node.g('Subgraph') for node in diff_nodes]
249
+
250
+ # Note: currently no tests have fusible_nodes
251
+ fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs]))
252
+ fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes]
253
+
254
+ # For any non-fusible node, it must show up in one of the DifferentiableGraphs.
255
+ nodes_in_diff_graph = []
256
+ nodes_not_in_diff_graph = []
257
+ non_fusible_nodes_being_fused = []
258
+ for node in nonfusible_nodes:
259
+ if any(g.findNode(node) is not None for g in diff_subgraphs):
260
+ nodes_in_diff_graph.append(node)
261
+ else:
262
+ nodes_not_in_diff_graph.append(node)
263
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
264
+ non_fusible_nodes_being_fused.append(node)
265
+ found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes)
266
+
267
+ # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs.
268
+ fusion_nodes_found = []
269
+ fusion_nodes_not_found = []
270
+ for node in fusible_nodes:
271
+ if any(g.findNode(node) is not None for g in fusion_subgraphs):
272
+ fusion_nodes_found.append(node)
273
+ else:
274
+ fusion_nodes_not_found.append(node)
275
+ found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes)
276
+
277
+ if should_autodiff_node is not None:
278
+ err_msg = self.autoDiffErrorMessage(should_autodiff_node,
279
+ nodes_not_in_diff_graph,
280
+ fusion_nodes_not_found,
281
+ non_fusible_nodes_being_fused,
282
+ fusion_nodes_found,
283
+ nodes_in_diff_graph)
284
+ self.assertEqual(should_autodiff_node,
285
+ found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg)
286
+
287
+ def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]],
288
+ traced_graph, assert_propagation, constant_prop=True):
289
+ # repropagte input shapes provided by tracing,
290
+ prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
291
+ for enable_test_mode in [True, False]:
292
+ # here we are testing allowing/disallowing substituting in complete shapes as constants,
293
+ # disallowing constants helps stress test partial eval and substitution pipeline
294
+ torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode)
295
+ torch._C._jit_erase_non_input_shape_information(traced_graph)
296
+ if constant_prop:
297
+ torch._C._jit_pass_constant_propagation(traced_graph)
298
+ torch._C._jit_pass_propagate_shapes_on_graph(traced_graph)
299
+ # Add sizes to default tensor type to avoid checking something out of scope
300
+ # and difficulties with tracer leaving in other parts of tensor type
301
+ output = next(traced_graph.outputs()).type()
302
+
303
+ def test_type(type, actual_size):
304
+ sizes = type.symbolic_sizes()
305
+ out_type = TensorType.get().with_sizes(sizes)
306
+ actual_type = TensorType.get().with_sizes(actual_size)
307
+
308
+ # always check actual shape is a subtype of the output
309
+ self.assertTrue(actual_type.isSubtypeOf(out_type))
310
+
311
+ # and then if assertion flag is provided, check shape analysis
312
+ # is successful
313
+ if assert_propagation:
314
+ self.assertEqual(out_type.sizes(), actual_size)
315
+
316
+ if output.isSubtypeOf(torch._C.TensorType.get()):
317
+ test_type(output, out_sizes)
318
+ else:
319
+ tuple_elements = output.elements()
320
+ for i in range(len(tuple_elements)):
321
+ test_type(tuple_elements[i], out_sizes[i])
322
+
323
+ torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ r"""Importing this file includes common utility methods for checking quantized
4
+ tensors and modules.
5
+ """
6
+ import numpy as np
7
+ import torch
8
+ from contextlib import contextmanager
9
+ from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS
10
+
11
+ supported_qengines = torch.backends.quantized.supported_engines
12
+ supported_qengines.remove('none')
13
+ # Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326
14
+ # QNNPACK is not supported on PPC
15
+ # QNNPACK throws ASAN heap-buffer-overflow error.
16
+ if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]):
17
+ supported_qengines.remove('qnnpack')
18
+
19
+ def _conv_output_shape(input_size, kernel_size, padding, stride, dilation,
20
+ output_padding=0):
21
+ """Computes the output shape given convolution parameters."""
22
+ return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1)
23
+ * (dilation - 1)) / stride) + 2 * output_padding + 1
24
+
25
+ # Quantization references
26
+ def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8):
27
+ """Quantizes a numpy array."""
28
+ if qmin is None:
29
+ qmin = np.iinfo(dtype).min
30
+ if qmax is None:
31
+ qmax = np.iinfo(dtype).max
32
+ qx = np.round(x / scale + zero_point).astype(np.int64)
33
+ qx = np.clip(qx, qmin, qmax)
34
+ qx = qx.astype(dtype)
35
+ return qx
36
+
37
+
38
+ def _dequantize(qx, scale, zero_point):
39
+ """Dequantizes a numpy array."""
40
+ x = (qx.astype(float) - zero_point) * scale
41
+ return x
42
+
43
+
44
+ def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8):
45
+ """Requantizes a numpy array, i.e., intermediate int32 or int16 values are
46
+ converted back to given type"""
47
+ qx = (x * multiplier).round() + zero_point
48
+ qx = np.clip(qx, qmin, qmax).astype(qtype)
49
+ return qx
50
+
51
+ def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine):
52
+ """Calculate the dynamic quantization parameters (scale, zero_point)
53
+ according to the min and max element of the tensor"""
54
+ assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric)
55
+ if qscheme == torch.per_tensor_symmetric:
56
+ assert dtype == torch.qint8
57
+ if isinstance(X, torch.Tensor):
58
+ X = X.numpy()
59
+ if dtype == torch.qint8:
60
+ if reduce_range:
61
+ qmin, qmax = -64, 63
62
+ else:
63
+ qmin, qmax = -128, 127
64
+ else: # dtype == torch.quint8
65
+ if reduce_range:
66
+ qmin, qmax = 0, 127
67
+ else:
68
+ qmin, qmax = 0, 255
69
+ min_val = X.min()
70
+ max_val = X.max()
71
+ is_symmetric = (qscheme == torch.per_tensor_symmetric)
72
+ if min_val == max_val:
73
+ scale = 1.0
74
+ zero_point = 0
75
+ else:
76
+ if is_symmetric:
77
+ max_val = max(max_val, -min_val)
78
+ min_val = -max_val
79
+ scale = (max_val - min_val) / (qmax - qmin)
80
+ scale = max(scale, np.finfo(np.float32).eps)
81
+ zero_point = 0
82
+ else:
83
+ max_val = max(max_val, 0.0)
84
+ min_val = min(min_val, 0.0)
85
+ scale = (max_val - min_val) / (qmax - qmin)
86
+ scale = max(scale, np.finfo(np.float32).eps)
87
+ zero_point = qmin - round(min_val / scale)
88
+ zero_point = max(qmin, zero_point)
89
+ zero_point = min(qmax, zero_point)
90
+ return [float(scale), int(zero_point)]
91
+
92
+ def _calculate_dynamic_per_channel_qparams(X, dtype):
93
+ """Calculate the dynamic quantization parameters (scale, zero_point)
94
+ according to the min and max element of the tensor"""
95
+ if isinstance(X, torch.Tensor):
96
+ X = X.numpy()
97
+ qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max
98
+ n_levels = qmax - qmin
99
+ scale = np.zeros(X.shape[0], dtype=np.float64)
100
+ zero_point = np.zeros(X.shape[0], dtype=np.int64)
101
+ for i in range(zero_point.shape[0]):
102
+ min_val = X.min()
103
+ max_val = X.max()
104
+ if min_val == max_val:
105
+ scale[i] = 1.0
106
+ zero_point[i] = 0
107
+ else:
108
+ max_val = max(max_val, 0.0)
109
+ min_val = min(min_val, 0.0)
110
+ scale[i] = (max_val - min_val) / n_levels
111
+ scale[i] = max(scale[i], np.finfo(np.float32).eps)
112
+ zero_point[i] = qmin - round(min_val / scale[i])
113
+ zero_point[i] = max(qmin, zero_point[i])
114
+ zero_point[i] = min(qmax, zero_point[i])
115
+
116
+ return scale, zero_point
117
+
118
+ def _snr(x, x_hat):
119
+ """Calculates the signal to noise ratio and returns the signal and noise
120
+ power, as well as the SNR in dB.
121
+ If the input is a list/tuple this function is called recursively on each
122
+ element. The result will have the same nested structure as the inputs.
123
+
124
+ Args:
125
+ x, x_hat: Either a tensor or a nested list/tuple of tensors.
126
+ Returns:
127
+ signal, noise, SNR(in dB): Either floats or a nested list of floats
128
+ """
129
+ if isinstance(x, (list, tuple)):
130
+ assert len(x) == len(x_hat)
131
+ res = []
132
+ for idx in range(len(x)):
133
+ res.append(_snr(x[idx], x_hat[idx]))
134
+ return res
135
+ if x_hat.is_quantized:
136
+ x_hat = x_hat.dequantize()
137
+ if x.is_quantized:
138
+ x = x.dequantize()
139
+ noise = (x - x_hat).norm()
140
+ if noise == 0:
141
+ return 0.0, float('inf'), float('inf')
142
+ signal = x.norm()
143
+ snr = signal / noise
144
+ snr_db = 20 * snr.log10()
145
+ return signal, noise, snr_db
146
+
147
+ @contextmanager
148
+ def override_quantized_engine(qengine):
149
+ previous = torch.backends.quantized.engine
150
+ torch.backends.quantized.engine = qengine
151
+ try:
152
+ yield
153
+ finally:
154
+ torch.backends.quantized.engine = previous
155
+
156
+ @contextmanager
157
+ def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack):
158
+ try:
159
+ if qengine_is_qnnpack:
160
+ torch._C._set_default_mobile_cpu_allocator()
161
+ yield
162
+ finally:
163
+ if qengine_is_qnnpack:
164
+ torch._C._unset_default_mobile_cpu_allocator()
165
+
166
+ # TODO: Update all quantization tests to use this decorator.
167
+ # Currently for some of the tests it seems to have inconsistent params
168
+ # for fbgemm vs qnnpack.
169
+ def override_qengines(qfunction):
170
+ def test_fn(*args, **kwargs):
171
+ for qengine in supported_qengines:
172
+ with override_quantized_engine(qengine):
173
+ # qfunction should not return anything.
174
+ qfunction(*args, **kwargs)
175
+ return test_fn
176
+
177
+ def qengine_is_fbgemm():
178
+ return torch.backends.quantized.engine == 'fbgemm'
179
+ def qengine_is_qnnpack():
180
+ return torch.backends.quantized.engine == 'qnnpack'
181
+ def qengine_is_onednn():
182
+ return torch.backends.quantized.engine == 'onednn'
183
+ def qengine_is_x86():
184
+ return torch.backends.quantized.engine == 'x86'
185
+
186
+ # Helper function used to simulate per-channel fake-quant against any axis
187
+ def _permute_to_axis_zero(X, axis):
188
+ new_axis_list = list(range(X.dim()))
189
+ new_axis_list[axis] = 0
190
+ new_axis_list[0] = axis
191
+ y = X.permute(tuple(new_axis_list))
192
+ return y, new_axis_list
193
+
194
+ # Reference method for fake quantize
195
+ # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
196
+ def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
197
+ dtype = X.dtype
198
+ X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
199
+ res = torch.zeros_like(X)
200
+
201
+ for i in range(X.size()[0]):
202
+ res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) +
203
+ per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i]
204
+
205
+ out = res.permute(tuple(permute_axis_list))
206
+ return out.to(dtype)
207
+
208
+ # Reference method for the gradient of the fake quantize operator
209
+ # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64
210
+ def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
211
+ dtype = X.dtype
212
+ X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis)
213
+ Xq = torch.zeros_like(X)
214
+ for i in range(X.size()[0]):
215
+ Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i])
216
+ Xq = Xq.permute(tuple(permute_axis_list))
217
+ mask = (Xq >= quant_min) * (Xq <= quant_max)
218
+ res = torch.zeros_like(dY)
219
+ res[mask] = dY[mask]
220
+ return res.to(dtype)
221
+
222
+ def to_tensor(X, device):
223
+ if not isinstance(X, torch.Tensor):
224
+ X = torch.tensor(X)
225
+ else:
226
+ X = X.clone().detach()
227
+ return X.to(device=torch.device(device), dtype=torch.float32)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ import itertools
6
+
7
+ from torch.utils._python_dispatch import TorchDispatchMode
8
+ from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
9
+ from torch.utils import _pytree as pytree
10
+ from functools import partial
11
+ from torch.utils._mode_utils import no_dispatch, all_same_mode
12
+ import torch.autograd.forward_ad as fwAD
13
+ from typing import Callable
14
+ import re
15
+
16
+
17
+ def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor):
18
+ elem = wrapper_tensor.elem
19
+ metadata_wrapper_tensor = metadata_accessor(wrapper_tensor)
20
+ metadata_elem = metadata_accessor(elem)
21
+ if metadata_wrapper_tensor == metadata_elem:
22
+ return
23
+ raise RuntimeError(
24
+ f"This operator is not Composite Compliant: the "
25
+ f"{metadata_name} of the tensor was modified directly without "
26
+ f"going through the PyTorch dispatcher.")
27
+
28
+ def check_metadata_consistency(wrapper_tensor, CCT):
29
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct
30
+ if not isinstance(wrapper_tensor, CCT):
31
+ return
32
+ things_to_check = {
33
+ 'shape': Tensor.size,
34
+ 'dtype': lambda x: x.dtype,
35
+ 'device': lambda x: x.device,
36
+ 'numel': Tensor.numel,
37
+ 'stride': Tensor.stride,
38
+ 'storage_offset': Tensor.storage_offset,
39
+ }
40
+ for metadata_name, metadata_accessor in things_to_check.items():
41
+ check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor)
42
+
43
+ def is_view_fn(func):
44
+ return func.overloadpacket.__name__ in {
45
+ 'as_strided',
46
+ 'detach',
47
+ 'diagonal',
48
+ 'expand',
49
+ 'expand_as',
50
+ 'movedim',
51
+ 'narrow',
52
+ 'permute',
53
+ 'select',
54
+ 'squeeze',
55
+ 'transpose',
56
+ 't',
57
+ 'real',
58
+ 'imag',
59
+ 'view_as_real',
60
+ 'view_as_complex',
61
+ 'unflatten',
62
+ 'unfold',
63
+ 'unsqueeze',
64
+ 'view',
65
+ 'view_as',
66
+ 'unbind',
67
+ 'split',
68
+ 'split_with_sizes',
69
+ 'vsplit',
70
+ 'hsplit',
71
+ 'tensor_split',
72
+ 'chunk',
73
+ 'swapaxes',
74
+ 'slice',
75
+ '_reshape_alias',
76
+ '_unsafe_view',
77
+ '_conj',
78
+ 'alias',
79
+ }
80
+
81
+ # manually populated from native_functions that have inplace_view: True.
82
+ # In the future we will probably be able to grab that list directly
83
+ def is_inplace_view_fn(func):
84
+ return func.overloadpacket.__name__ in {
85
+ 'as_strided_',
86
+ 'detach_',
87
+ 'squeeze_',
88
+ 'swapaxes_',
89
+ 'swapdims_',
90
+ 't_',
91
+ 'transpose_',
92
+ 'unsqueeze_',
93
+ }
94
+
95
+
96
+ # Introspection please save us
97
+ def is_inplace(func):
98
+ name = func.overloadpacket.__name__
99
+ if re.match('__i.+__', name):
100
+ return True
101
+ if re.match('__.+__', name):
102
+ return False
103
+ return name[-1] == '_'
104
+
105
+
106
+ def generate_cct_and_mode(autograd_view_consistency=True):
107
+ # This function returns a new class CompositeCompliantTensor
108
+ # The two arguments control the behaviour described below.
109
+
110
+ # autograd_view_consistency:
111
+ # If True, alias result using `set_` if func returns a view
112
+ # (See Note [Alias Result]).
113
+ # Since Forward AD doesn't work with `set_`
114
+ # we disable it by setting alias to False.
115
+
116
+ class CompositeCompliantTensor(torch.Tensor):
117
+ elem: torch.Tensor
118
+
119
+ __slots__ = ['elem']
120
+
121
+ @staticmethod
122
+ def __new__(cls, elem, mode, *args, **kwargs):
123
+ assert type(elem) is not cls, \
124
+ "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported"
125
+
126
+ # The storage of CompositeCompliantTensor should never be used directly
127
+ # by a Composite operation; if the Composite
128
+ # operator attempts to read from the storage without dispatching then it'll
129
+ # raise a RuntimeError due to it being a meta storage.
130
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
131
+ cls, elem.size(),
132
+ dtype=elem.dtype, layout=elem.layout,
133
+ device=elem.device, requires_grad=elem.requires_grad,
134
+ strides=elem.stride(), storage_offset=elem.storage_offset())
135
+
136
+ if elem.requires_grad:
137
+ # CompositeCompliantTensor steals the "requires_grad"-ness.
138
+ # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests...
139
+ tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype,
140
+ device=elem.device, layout=elem.layout,
141
+ requires_grad=False)
142
+ tmp.copy_(elem.detach())
143
+ r.elem = tmp
144
+ else:
145
+ r.elem = elem
146
+
147
+ assert r.stride() == r.elem.stride()
148
+
149
+ # Propagate conjugate bits to the wrapper tensor
150
+ # Ref: https://github.com/albanD/subclass_zoo/issues/24
151
+ # Ref: https://github.com/albanD/subclass_zoo/issues/21
152
+ torch._C._set_conj(r, r.elem.is_conj())
153
+ torch._C._set_neg(r, r.elem.is_neg())
154
+
155
+ r.mode = mode
156
+ return r
157
+
158
+ def __repr__(self):
159
+ return f"CompositeCompliantTensor({self.elem})"
160
+
161
+ @classmethod
162
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
163
+ all_args = pytree.arg_tree_leaves(*args, **(kwargs or {}))
164
+ modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor))
165
+ if not all_same_mode(modes):
166
+ raise RuntimeError("Multiple CompositeCompliantTensorModes NYI")
167
+ with modes[0]:
168
+ return func(*args, **kwargs)
169
+
170
+ class CompositeCompliantTensorMode(TorchDispatchMode):
171
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
172
+ def unwrap(e):
173
+ return e.elem if isinstance(e, CompositeCompliantTensor) else e
174
+
175
+ def wrap(e):
176
+ return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e
177
+
178
+ if func == torch.ops.aten._local_scalar_dense.default:
179
+ raise RuntimeError(
180
+ ".item() is not allowed to be called inside of composite "
181
+ "functions in the PyTorch library because not all backends "
182
+ "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.")
183
+
184
+ if func.overloadpacket.__name__ in ('set_', 'resize_'):
185
+ raise RuntimeError(
186
+ f"{func.__name__} is not allowed to be called inside of "
187
+ f"Composite operators.")
188
+
189
+ if is_inplace(func):
190
+ # NB: We are making an assumption that if the function is in-place,
191
+ # then the first argument is being written to. Introspection please save us!
192
+ mutated_argument = args[0]
193
+ if not isinstance(mutated_argument, CompositeCompliantTensor) and \
194
+ any(isinstance(a, CompositeCompliantTensor) for a in args[1:]):
195
+ raise RuntimeError(
196
+ 'Not composite compliant: performing in-place operation '
197
+ f'{func.__name__} where the Tensor being written to is '
198
+ 'regular Tensor but the other tensors are Tensor Subclasses. '
199
+ 'Please try to avoid this in-place operation.')
200
+
201
+ unwrapped_args = tree_map(unwrap, args)
202
+ unwrapped_kwargs = tree_map(unwrap, kwargs)
203
+ unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs)
204
+ rs = tree_map(wrap, unwrapped_rs)
205
+
206
+ if is_view_fn(func) and autograd_view_consistency:
207
+ # Note [Alias Result]
208
+ # Autograd asserts that for B = A.view_fn(...), B and A's storages
209
+ # are the same. Here we try to make B alias A to avoid those asserts.
210
+ # See https://github.com/pytorch/pytorch/issues/65339 for more information
211
+ # about the issue.
212
+ with no_dispatch():
213
+ # Idea: this is a weird way of getting a storage that aliases the input.
214
+ # This is a workaround for #65339.
215
+ # 1. under no_dispatch, all of the wrapper tensors look like regular
216
+ # tensors with special storage (the storage is nullptr and
217
+ # advertises CPU/CUDA device.
218
+ # 2. we run func, which ends up running the view operation
219
+ # 3. All view operations reuse the input's storage and return
220
+ # result Tensor(s) with new sizes/strides/offset that alias
221
+ # the input.
222
+ # 4. we set the storage (and sizes/strides/offset) of the wrapper
223
+ # tensor results to be that of the tensors that alias the input
224
+ result = func(*args, **kwargs)
225
+ if isinstance(result, (tuple, list)):
226
+ for a, b in zip(rs, result):
227
+ a.set_(b)
228
+ else:
229
+ rs.set_(result)
230
+
231
+ # Some operations are allowed to in-place modify the metadata of the
232
+ # inputs. The only ones are the "inplace view functions"; when we
233
+ # run into these, we manually modify the metadata of the input.
234
+ with no_dispatch():
235
+ if is_inplace_view_fn(func):
236
+ func(*args, **kwargs)
237
+
238
+ # For each CompositeCompliantTensor t, we check that t and t.elem
239
+ # have consistent metadata. If they don't have consistent metadata,
240
+ # that means the operator did something fishy.
241
+ check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor)
242
+ pytree.tree_map_(check, args)
243
+ pytree.tree_map_(check, kwargs)
244
+ pytree.tree_map_(check, rs)
245
+ return rs
246
+
247
+ return CompositeCompliantTensor, CompositeCompliantTensorMode()
248
+
249
+ def is_tensorlist(lst):
250
+ if not isinstance(lst, list) and not isinstance(lst, tuple):
251
+ return False
252
+ if len(lst) == 0:
253
+ return False
254
+ all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst)
255
+ if all_tensors:
256
+ return True
257
+ exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst)
258
+ if exists_one_tensor:
259
+ raise RuntimeError('This test assumes that PyTorch APIs cannot take '
260
+ 'mixed lists of Tensor and other things')
261
+ return False
262
+
263
+
264
+ def maybe_map(fn, should_map, arg):
265
+ return fn(arg) if should_map else arg
266
+
267
+
268
+ def wrap(arg, CCT, cct_mode):
269
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
270
+ if isinstance(arg, torch.Tensor):
271
+ return CCT(arg, cct_mode)
272
+ if is_tensorlist(arg):
273
+ return [CCT(a, cct_mode) for a in arg]
274
+ raise RuntimeError("wrap assumes that the input can be wrapped")
275
+
276
+
277
+ # Given a list of flat arguments, some of which may be Tensors, return all
278
+ # possible ways some of the arguments could be CompositeCompliantTensors (CCT).
279
+ # For example, given Tensors A, B, C and flat_args = [A, 1, B],
280
+ # We would return the following 4 options:
281
+ # [CCT(A), 1, CCT(B)]
282
+ # [CCT(A), 1, B]
283
+ # [A, 1, CCT(B)]
284
+ # [A, 1, B]
285
+ # NB: Yes, this is exponential. No, we don't care too much because PyTorch ops
286
+ # don't accept that many input Tensors.
287
+ def generate_subclass_choices(flat_args, CCT, cct_mode):
288
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
289
+ is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args]
290
+ subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes]
291
+
292
+ for which_args_are_wrapped in itertools.product(*subclass_options):
293
+
294
+ result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg)
295
+ for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)]
296
+ yield result, which_args_are_wrapped
297
+
298
+
299
+ # For an operation f(*args, **kwargs), each Tensor argument may either be
300
+ # a regular Tensor or a Tensor Subclass. This iterator iterates through
301
+ # all of those options.
302
+ def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
303
+ # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode
304
+ flat_kwargs, spec = tree_flatten(kwargs)
305
+ flat_args_kwargs = list(args) + list(flat_kwargs)
306
+ for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode):
307
+ new_args = choice[:len(args)]
308
+ new_kwargs = tree_unflatten(choice[len(args):], spec)
309
+ which_args_are_wrapped = debug_metadata[:len(args)]
310
+ which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec)
311
+ yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped
312
+
313
+
314
+ def raise_composite_compliance_error(err, additional_info=''):
315
+ raise RuntimeError(
316
+ "Composite compliance check failed with "
317
+ "the above error.\n"
318
+ f"{additional_info}"
319
+ "If you are adding an OpInfo of an "
320
+ "existing operator, please feel free to skip this test "
321
+ "because the problem was pre-existing and file an issue. "
322
+ "Otherwise, if you added a new operator, please read "
323
+ "through the Composite Compliance section in "
324
+ "aten/src/ATen/native/README.md for how to resolve this. "
325
+ ) from err
326
+
327
+
328
+ # This test checks ALL possible permutations of calling `op` with arguments
329
+ # that are individually either a regular Tensor or a Tensor subclass.
330
+ #
331
+ # The general strategy is to wrap some Tensor args and kwargs in
332
+ # CompositeCompliantTensor wrappers and call the operation.
333
+
334
+ # If some composite operation does any non-compliant behavior,
335
+ # CompositeCompliantTensor will raise an error.
336
+ def check_all_permutations(op, args, kwargs, assert_equal_fn):
337
+ CCT, cct_mode = generate_cct_and_mode()
338
+ expected = op(*args, **kwargs)
339
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
340
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
341
+
342
+ try:
343
+ actual = op(*new_args, **new_kwargs)
344
+ # NOTE: [What errors are Composite Compliance trying to catch?]
345
+ #
346
+ # There's two things we want to catch:
347
+ # - errors that would raise within the torch_dispatch impl
348
+ # - data_ptr accesses
349
+ # The first is easy to filter for (we could make the error a different
350
+ # error class), the second is always going to be a RuntimeError due to
351
+ # how it is implemented (if you try to access the data_ptr of thex
352
+ # wrapper Tensor, it raises you some internal RuntimeError).
353
+ #
354
+ # So the most general thing to catch here was RuntimeError. If you
355
+ # are here and debugging why your test failed, it's plausible that
356
+ # the operator itself is broken and that there are other tests failing.
357
+ except RuntimeError as err:
358
+ raise_composite_compliance_error(
359
+ err,
360
+ f"- wrapped_args: {which_args_are_wrapped}\n"
361
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
362
+ )
363
+
364
+ def unwrap(e):
365
+ return e.elem if isinstance(e, CCT) else e
366
+
367
+ assert_equal_fn(tree_map(unwrap, actual), expected)
368
+
369
+ # Checks via the usage of torch dispatch mode certain anti-patterns that
370
+ # are not composite compliant.
371
+ #
372
+ # In particular, the anti-pattern we are trying to prevent is a user
373
+ # creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps
374
+ # here because all factory functions will create tensors that are
375
+ # CompositeCompliantTensor.
376
+ #
377
+ # The general strategy is to wrap all Tensor args and kwargs in
378
+ # CompositeCompliantTensor wrappers. If an operator that is
379
+ # Composite does any non-compliant behavior,
380
+ # CompositeCompliantTensor will raise an error.
381
+ def check_with_mode(op, args, kwargs, assert_equal_fn):
382
+ CCT, cct_mode = generate_cct_and_mode()
383
+
384
+ def wrap(e):
385
+ return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e
386
+
387
+ expected = op(*args, **kwargs)
388
+
389
+ args = tree_map(wrap, args)
390
+ kwargs = tree_map(wrap, kwargs)
391
+ try:
392
+ with cct_mode:
393
+ actual = op(*args, **kwargs)
394
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
395
+ except RuntimeError as err:
396
+ raise_composite_compliance_error(err)
397
+
398
+ def unwrap(e):
399
+ return e.elem if isinstance(e, CCT) else e
400
+
401
+ assert_equal_fn(tree_map(unwrap, actual), expected)
402
+
403
+ def gather_leaf_tensors(args, kwargs):
404
+ leaf_tensors = []
405
+ args, args_spec = tree_flatten(args)
406
+ kwargs, kwargs_spec = tree_flatten(kwargs)
407
+ args = args + kwargs
408
+ for arg in args:
409
+ if not isinstance(arg, torch.Tensor):
410
+ continue
411
+ if arg.requires_grad:
412
+ leaf_tensors.append(arg)
413
+ return leaf_tensors
414
+
415
+
416
+ def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None):
417
+ if gradcheck_wrapper is None:
418
+ results = op(*args, **kwargs)
419
+ else:
420
+ results = gradcheck_wrapper(op, *args, **kwargs)
421
+
422
+ if output_process_fn_grad is not None:
423
+ results = output_process_fn_grad(results)
424
+
425
+ flat_results = pytree.tree_leaves(results)
426
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
427
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
428
+ assert len(flat_diff_results) > 0
429
+
430
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results]
431
+ leaf_tensors = gather_leaf_tensors(args, kwargs)
432
+ assert len(leaf_tensors) > 0
433
+ return torch.autograd.grad(flat_diff_results, leaf_tensors,
434
+ grads, allow_unused=True, retain_graph=True)
435
+
436
+
437
+ # Checks if the backward formula is composite compliant by testing
438
+ # all possible permutations of {inputs, grad_outputs} being
439
+ # CompositeCompliantTensor or regular Tensors.
440
+ #
441
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
442
+ # this means we can apply check_backward_formula to things that aren't OpInfos
443
+ # while debugging.
444
+ def check_backward_formula(op: Callable, args, kwargs,
445
+ output_process_fn_grad=None,
446
+ gradcheck_wrapper=None, assert_equal_fn=None):
447
+ CCT, cct_mode = generate_cct_and_mode()
448
+
449
+ expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper)
450
+
451
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
452
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
453
+ leaf_tensors = gather_leaf_tensors(new_args, new_kwargs)
454
+ assert len(leaf_tensors) > 0
455
+
456
+ try:
457
+ if gradcheck_wrapper is None:
458
+ results = op(*new_args, **new_kwargs)
459
+ else:
460
+ results = gradcheck_wrapper(op, *new_args, **new_kwargs)
461
+ if output_process_fn_grad is not None:
462
+ results = output_process_fn_grad(results)
463
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
464
+ except RuntimeError as err:
465
+ raise_composite_compliance_error(
466
+ err,
467
+ f"- wrapped_args: {which_args_are_wrapped}\n"
468
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
469
+ )
470
+
471
+ flat_results = pytree.tree_leaves(results)
472
+ flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)]
473
+ flat_diff_results = [r for r in flat_results if r.requires_grad]
474
+ assert len(flat_diff_results) > 0
475
+
476
+ # NB: ones, not ones_like, so we get a regular Tensor here
477
+ grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype)
478
+ for r in flat_diff_results]
479
+ for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode):
480
+ try:
481
+ actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads,
482
+ allow_unused=True, retain_graph=True)
483
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
484
+ except RuntimeError as err:
485
+ raise_composite_compliance_error(
486
+ err,
487
+ f"- wrapped_args: {which_args_are_wrapped}\n"
488
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
489
+ f"- wrapped_grads: {which_grad_is_batched}\n"
490
+ )
491
+
492
+ def unwrap(e):
493
+ return e.elem if isinstance(e, CCT) else e
494
+
495
+ assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True)
496
+
497
+ # Checks if the forward AD formula is composite compliant by testing
498
+ # all possible permutations of {primals, tangents} being
499
+ # CompositeCompliantTensor or regular Tensors.
500
+ #
501
+ # NB: it is important that op is accepted as a Callable and not an OpInfo,
502
+ # this means we can apply check_forward_ad_formula to things that aren't OpInfos
503
+ # while debugging.
504
+ def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None):
505
+ CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False)
506
+
507
+ def maybe_tangent(t):
508
+ assert type(t) is not CCT
509
+ # Generate `tangent` tensor
510
+ # if given object is a Tensor and requires grad is set.
511
+ if isinstance(t, torch.Tensor) and t.requires_grad:
512
+ return torch.randn_like(t)
513
+ elif is_tensorlist(t):
514
+ return [torch.randn_like(e) if e.requires_grad else None for e in t]
515
+ return None
516
+
517
+ tangent_args = tuple(maybe_tangent(arg) for arg in args)
518
+ flat_kwargs, spec = tree_flatten(kwargs)
519
+ flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs)
520
+ tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec)
521
+
522
+ with fwAD.dual_level():
523
+ def maybe_make_dual(dual):
524
+ # Returns dual tensor if primal is a tensor/tensor subclass
525
+ # with requires_grad set.
526
+ primal, tangent = dual
527
+ if isinstance(primal, torch.Tensor) and primal.requires_grad:
528
+ return fwAD.make_dual(primal.detach(), tangent)
529
+ elif is_tensorlist(primal):
530
+ return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri
531
+ for pri, tang in zip(primal, tangent))
532
+ return primal
533
+
534
+ def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs):
535
+ op_args = tuple(map(maybe_make_dual, zip(args, tangent_args)))
536
+ op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()}
537
+
538
+ if gradcheck_wrapper is None:
539
+ return op(*op_args, **op_kwargs)
540
+ return gradcheck_wrapper(op, *op_args, **op_kwargs)
541
+
542
+ expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs)
543
+ expected = tree_map(fwAD.unpack_dual, expected)
544
+ expected_primals = tree_map(lambda x: x.primal, expected)
545
+ expected_tangents = tree_map(lambda x: x.tangent, expected)
546
+
547
+ # Permutations of arg and kwargs in CCT.
548
+ for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode):
549
+ new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice
550
+
551
+ # Permutations tangent arg and tangent kwargs in CCT.
552
+ for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode):
553
+ new_tang_args, new_tang_kwargs, \
554
+ which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice
555
+
556
+ op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args)))
557
+ op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()}
558
+
559
+ try:
560
+ if gradcheck_wrapper is None:
561
+ actual = op(*op_args, **op_kwargs)
562
+ else:
563
+ actual = gradcheck_wrapper(op, *op_args, **op_kwargs)
564
+ # see NOTE: [What errors are Composite Compliance trying to catch?]
565
+ except RuntimeError as err:
566
+ raise_composite_compliance_error(
567
+ err,
568
+ f"- wrapped_args: {which_args_are_wrapped}\n"
569
+ f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n"
570
+ f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n"
571
+ f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n"
572
+ )
573
+
574
+ def unwrap(e):
575
+ return e.elem if isinstance(e, CCT) else e
576
+
577
+ actual = tree_map(fwAD.unpack_dual, actual)
578
+ actual_primals = tree_map(lambda x: unwrap(x.primal), actual)
579
+ actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual)
580
+ assert_equal_fn(actual_primals, expected_primals, equal_nan=True)
581
+ assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-decorators
2
+ # mypy: allow-untyped-defs
3
+ import torch
4
+ import functools
5
+ from torch.testing import make_tensor
6
+ from torch.testing._internal.opinfo.core import (
7
+ OpInfo,
8
+ SampleInput,
9
+ )
10
+ from torch.testing._internal.common_dtype import all_types_and
11
+ import numpy as np
12
+ from torch.testing._internal.autograd_function_db import (
13
+ sample_inputs_numpy_cube,
14
+ sample_inputs_numpy_mul,
15
+ sample_inputs_numpy_mul_scalar,
16
+ sample_inputs_numpy_sort,
17
+ sample_inputs_numpy_take,
18
+ )
19
+ from torch import Tensor
20
+ from torch.types import Number
21
+ from typing import * # noqa: F403
22
+
23
+ # Note: [custom op db]
24
+ #
25
+ # This is a collection of custom operator test cases written as OpInfos
26
+ # so they can easily be consumed by OpInfo-based tests to check if subsystems
27
+ # support them correctly.
28
+
29
+ def to_numpy(tensor):
30
+ return tensor.cpu().numpy()
31
+
32
+ @torch.library.custom_op("_torch_testing::numpy_cube", mutates_args=())
33
+ def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]:
34
+ x_np = to_numpy(x)
35
+ dx = torch.tensor(3 * x_np ** 2, device=x.device)
36
+ return torch.tensor(x_np ** 3, device=x.device), dx
37
+
38
+ @numpy_cube.register_fake
39
+ def _(x):
40
+ return x.clone(), x.clone()
41
+
42
+ def numpy_cube_setup_context(ctx, inputs, output):
43
+ x, = inputs
44
+ cube, dx = output
45
+ ctx.save_for_backward(x, dx)
46
+
47
+ def numpy_cube_backward(ctx, grad_out, grad_dx):
48
+ x, dx = ctx.saved_tensors
49
+ grad_x = numpy_mul(grad_out, dx) + 6 * numpy_mul(grad_dx, x)
50
+ return grad_x
51
+
52
+ numpy_cube.register_autograd(numpy_cube_backward, setup_context=numpy_cube_setup_context)
53
+
54
+ def numpy_cube_vmap(info, in_dims, x):
55
+ result = numpy_cube(x)
56
+ return result, (in_dims[0], in_dims[0])
57
+
58
+ numpy_cube.register_vmap(numpy_cube_vmap)
59
+
60
+ @torch.library.custom_op("_torch_testing::numpy_mul", mutates_args=())
61
+ def numpy_mul(x: Tensor, y: Tensor) -> Tensor:
62
+ return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device)
63
+
64
+ @numpy_mul.register_fake
65
+ def _(x, y):
66
+ assert x.device == y.device
67
+ return (x * y).contiguous()
68
+
69
+ def numpy_mul_setup_context(ctx, inputs, output):
70
+ ctx.save_for_backward(*inputs)
71
+
72
+ def numpy_mul_backward(ctx, grad_out):
73
+ x, y = ctx.saved_tensors
74
+ grad_x = grad_out * y if ctx.needs_input_grad[0] else None
75
+ grad_y = grad_out * x if ctx.needs_input_grad[1] else None
76
+ return grad_x, grad_y
77
+
78
+ numpy_mul.register_autograd(numpy_mul_backward, setup_context=numpy_mul_setup_context)
79
+
80
+ def numpy_mul_vmap(info, in_dims, x, y):
81
+ x_bdim, y_bdim = in_dims
82
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
83
+ y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1)
84
+ result = x * y
85
+ result = result.movedim(-1, 0)
86
+ return result, 0
87
+
88
+ numpy_mul.register_vmap(numpy_mul_vmap)
89
+
90
+ @torch.library.custom_op("_torch_testing::numpy_mul_scalar", mutates_args=())
91
+ def numpy_mul_scalar(x: Tensor, *, scalar: float) -> Tensor:
92
+ return torch.tensor(to_numpy(x) * scalar, device=x.device)
93
+
94
+ @numpy_mul_scalar.register_fake
95
+ def _(x, *, scalar):
96
+ return (x * scalar).contiguous()
97
+
98
+ def numpy_mul_scalar_setup_context(ctx, inputs, keyword_only_inputs, output):
99
+ ctx.scalar = keyword_only_inputs["scalar"]
100
+
101
+ def numpy_mul_scalar_backward(ctx, grad_out):
102
+ grad_x = grad_out * ctx.scalar
103
+ return grad_x
104
+
105
+ numpy_mul_scalar.register_autograd(numpy_mul_scalar_backward, setup_context=numpy_mul_scalar_setup_context)
106
+
107
+ def numpy_mul_scalar_vmap(info, in_dims, x, *, scalar):
108
+ x_bdim, = in_dims
109
+ x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1)
110
+ result = x * scalar
111
+ result = result.movedim(-1, 0)
112
+ return result, 0
113
+
114
+ numpy_mul_scalar.register_vmap(numpy_mul_scalar_vmap)
115
+
116
+ @torch.library.custom_op("_torch_testing::numpy_sort", mutates_args=())
117
+ def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]:
118
+ device = x.device
119
+ x = to_numpy(x)
120
+ ind = np.argsort(x, axis=dim)
121
+ ind_inv = np.argsort(ind, axis=dim)
122
+ result = np.take_along_axis(x, ind, axis=dim)
123
+ return (
124
+ torch.tensor(result, device=device),
125
+ torch.tensor(ind, device=device),
126
+ torch.tensor(ind_inv, device=device),
127
+ )
128
+
129
+ @numpy_sort.register_fake
130
+ def _(x, dim):
131
+ return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long)
132
+
133
+ def numpy_sort_setup_context(ctx, inputs, output):
134
+ out, ind, ind_inv = output
135
+ ctx.dim = inputs[1]
136
+ ctx.save_for_backward(ind, ind_inv)
137
+ ctx.mark_non_differentiable(ind, ind_inv)
138
+
139
+ def numpy_sort_backward(ctx, grad_out, grad_ind, grad_ind_inv):
140
+ ind, ind_inv = ctx.saved_tensors
141
+ return numpy_take(grad_out, ind_inv, ind, ctx.dim), None
142
+
143
+ numpy_sort.register_autograd(numpy_sort_backward, setup_context=numpy_sort_setup_context)
144
+
145
+ def numpy_sort_vmap(info, in_dims, x, dim):
146
+ x_bdim, _ = in_dims
147
+ x = x.movedim(x_bdim, 0)
148
+ dim = dim if dim >= 0 else dim + x.dim() - 1
149
+ result = numpy_sort(x, dim + 1)
150
+ return result, (0, 0, 0)
151
+
152
+ numpy_sort.register_vmap(numpy_sort_vmap)
153
+
154
+ @torch.library.custom_op("_torch_testing::numpy_take", mutates_args=())
155
+ def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor:
156
+ device = x.device
157
+ x = to_numpy(x)
158
+ ind = to_numpy(ind)
159
+ return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
160
+
161
+ @numpy_take.register_fake
162
+ def _(x, ind, ind_inv, dim):
163
+ assert x.device == ind.device
164
+ assert x.device == ind_inv.device
165
+ assert ind.dtype == torch.long
166
+ assert ind_inv.dtype == torch.long
167
+ return torch.empty_like(x)
168
+
169
+ def numpy_take_setup_context(ctx, inputs, output):
170
+ x, ind, ind_inv, dim = inputs
171
+ ctx.dim = dim
172
+ ctx.save_for_backward(ind, ind_inv)
173
+
174
+ def numpy_take_backward(ctx, grad_out):
175
+ ind, ind_inv = ctx.saved_tensors
176
+ grad_x = numpy_take(grad_out, ind_inv, ind, ctx.dim)
177
+ return grad_x, None, None, None
178
+
179
+ numpy_take.register_autograd(numpy_take_backward, setup_context=numpy_take_setup_context)
180
+
181
+ def numpy_take_vmap(info, in_dims, x, ind, ind_inv, dim):
182
+ x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
183
+
184
+ # wrap dim
185
+ logical_dim = x.dim() if x_bdim is None else x_bdim - 1
186
+ dim = dim if dim >= 0 else dim + logical_dim
187
+
188
+ def expand_bdim(x, x_bdim):
189
+ if x_bdim is None:
190
+ return x.expand(info.batch_size, *x.shape)
191
+ return x.movedim(x_bdim, 0)
192
+
193
+ x = expand_bdim(x, x_bdim)
194
+ ind = expand_bdim(ind, ind_bdim)
195
+ ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
196
+
197
+ return numpy_take(x, ind, ind_inv, dim + 1), 0
198
+
199
+ numpy_take.register_vmap(numpy_take_vmap)
200
+
201
+ @torch.library.custom_op("_torch_testing::numpy_nonzero", mutates_args=())
202
+ def numpy_nonzero(x: Tensor) -> Tensor:
203
+ x_np = to_numpy(x)
204
+ res = np.stack(np.nonzero(x_np), axis=1)
205
+ if res.shape[0] <= 1:
206
+ raise RuntimeError("not supported")
207
+ return torch.tensor(res, device=x.device)
208
+
209
+ @numpy_nonzero.register_fake
210
+ def _(x):
211
+ ctx = torch._custom_op.impl.get_ctx()
212
+ i0 = ctx.create_unbacked_symint()
213
+ shape = [i0, x.dim()]
214
+ result = x.new_empty(shape, dtype=torch.long)
215
+ return result
216
+
217
+ def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs):
218
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
219
+ shape = 10
220
+ result = make_arg(shape, low=0.9, high=2)
221
+ mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long)
222
+ with torch.no_grad():
223
+ result *= mask
224
+
225
+ yield SampleInput(result, args=())
226
+
227
+ def numpy_nonzero_vmap(info, in_dims, x):
228
+ raise NotImplementedError("Operator is data-dependent and cannot be vmapped.")
229
+
230
+ numpy_nonzero.register_vmap(numpy_nonzero_vmap)
231
+
232
+ @torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=())
233
+ def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor:
234
+ return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device)
235
+
236
+ @numpy_view_copy.register_fake
237
+ def _(x, shape) -> Tensor:
238
+ return x.clone().view(shape).clone()
239
+
240
+ def numpy_view_copy_setup_context(ctx, inputs, output) -> None:
241
+ ctx.x_shape = inputs[0].shape
242
+
243
+ def numpy_view_copy_backward(ctx, grad_out):
244
+ return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None
245
+
246
+ numpy_view_copy.register_autograd(numpy_view_copy_backward, setup_context=numpy_view_copy_setup_context)
247
+
248
+ def numpy_view_copy_vmap(info, in_dims, x, shape):
249
+ x_bdim, _ = in_dims
250
+ x = x.movedim(x_bdim, 0)
251
+ x_shape = x.shape[0]
252
+ batch_shape = (x_shape, *shape)
253
+ result = numpy_view_copy(x, batch_shape)
254
+ return result, 0
255
+
256
+ numpy_view_copy.register_vmap(numpy_view_copy_vmap)
257
+
258
+ def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs):
259
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
260
+ result = make_arg(2, 3, 4, low=0.9, high=2)
261
+ yield SampleInput(result, args=([2, 12],))
262
+
263
+ @torch.library.custom_op('_torch_testing::numpy_cat', mutates_args=())
264
+ def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor:
265
+ assert len(xs) > 0
266
+ assert all(x.device == xs[0].device for x in xs)
267
+ assert all(x.dtype == xs[0].dtype for x in xs)
268
+ np_xs = [to_numpy(x) for x in xs]
269
+ np_out = np.concatenate(np_xs, axis=dim)
270
+ return torch.tensor(np_out, device=xs[0].device)
271
+
272
+ @numpy_cat.register_fake
273
+ def _(xs, dim):
274
+ assert len(xs) > 0
275
+ assert all(x.device == xs[0].device for x in xs)
276
+ assert all(x.dtype == xs[0].dtype for x in xs)
277
+ return torch.cat(xs, dim=dim)
278
+
279
+ def numpy_cat_setup_context(ctx, inputs, output):
280
+ xs, dim = inputs
281
+ ctx.dim_sizes = [x.shape[dim] for x in xs]
282
+ ctx.dim = dim
283
+
284
+ def numpy_cat_backward(ctx, grad_out):
285
+ dim_sizes = ctx.dim_sizes
286
+ dim = ctx.dim
287
+
288
+ splits = list(np.cumsum(dim_sizes)[:-1])
289
+ grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim)
290
+ return grad_xs, None
291
+
292
+ numpy_cat.register_autograd(numpy_cat_backward, setup_context=numpy_cat_setup_context)
293
+
294
+ def numpy_cat_vmap(info, in_dims, x, dim):
295
+ x_bdim, = in_dims
296
+ result = numpy_cat(x, dim)
297
+ return result, x_bdim
298
+
299
+ numpy_cat.register_vmap(numpy_cat_vmap)
300
+
301
+ def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs):
302
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
303
+ r0 = make_arg(2, 3, 4, low=0.9, high=2)
304
+ r1 = make_arg(4, 3, 4, low=0.9, high=2)
305
+ r2 = make_arg(5, 3, 4, low=0.9, high=2)
306
+ yield SampleInput([r0, r1, r2], args=(0,))
307
+
308
+ @torch.library.custom_op('_torch_testing::numpy_split_copy', mutates_args=())
309
+ def numpy_split_copy(x: Tensor, splits: Sequence[int], dim: int) -> List[Tensor]:
310
+ x_np = to_numpy(x)
311
+ arrs = np.split(x_np, splits, axis=dim)
312
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs]
313
+
314
+ @numpy_split_copy.register_fake
315
+ def _(x, splits, dim):
316
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)]
317
+
318
+ def numpy_split_copy_setup_context(ctx, inputs, output):
319
+ _, _, dim = inputs
320
+ ctx.dim = dim
321
+
322
+ def numpy_split_copy_backward(ctx, grad_out):
323
+ result = torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim)
324
+ return result, None, None
325
+
326
+ numpy_split_copy.register_autograd(numpy_split_copy_backward, setup_context=numpy_split_copy_setup_context)
327
+
328
+ def numpy_split_copy_vmap(info, in_dims, x, splits, dim):
329
+ x_bdim, _ , _ = in_dims
330
+ x = x.movedim(x_bdim, 0)
331
+ result = numpy_split_copy(x, splits, dim + 1)
332
+ return result, 0
333
+
334
+ numpy_split_copy.register_vmap(numpy_split_copy_vmap)
335
+
336
+ def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs):
337
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
338
+ x = make_arg(2, 9, low=0.9, high=2)
339
+ yield SampleInput(x, args=([1, 3, 6], 1))
340
+
341
+ @torch.library.custom_op('_torch_testing::numpy_split_copy_with_int', mutates_args=())
342
+ def numpy_split_copy_with_int(x: Tensor, splits: Sequence[int], dim: int) -> Tuple[List[Tensor], int]:
343
+ x_np = to_numpy(x)
344
+ arrs = np.split(x_np, splits, axis=dim)
345
+ return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits)
346
+
347
+ @numpy_split_copy_with_int.register_fake
348
+ def _(x, splits, dim):
349
+ return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits)
350
+
351
+ def numpy_split_copy_with_int_setup_context(ctx, inputs, output):
352
+ _, _, dim = inputs
353
+ ctx.dim = dim
354
+
355
+ def numpy_split_copy_with_int_backward(ctx, grad_out, _):
356
+ return torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim), None, None
357
+
358
+ numpy_split_copy_with_int.register_autograd(
359
+ numpy_split_copy_with_int_backward,
360
+ setup_context=numpy_split_copy_with_int_setup_context)
361
+
362
+ def numpy_split_copy_with_int_vmap(info, in_dims, x, splits, dim):
363
+ x_bdim, _ , _ = in_dims
364
+ x = x.movedim(x_bdim, 0)
365
+ result, len_split = numpy_split_copy_with_int(x, splits, dim + 1)
366
+ return (result, len_split), ([0 for _ in range(len(result))], None)
367
+
368
+ numpy_split_copy_with_int.register_vmap(numpy_split_copy_with_int_vmap)
369
+
370
+ @torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=())
371
+ def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor:
372
+ # Adapted from Ross Girshick's fast-rcnn implementation at
373
+ # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
374
+ assert boxes.device == scores.device
375
+ device = boxes.device
376
+
377
+ boxes = to_numpy(boxes)
378
+ scores = to_numpy(scores)
379
+
380
+ N = boxes.shape[0]
381
+ assert boxes.shape == (N, 4)
382
+ assert scores.shape == (N,)
383
+
384
+ x1 = boxes[:, 0]
385
+ y1 = boxes[:, 1]
386
+ x2 = boxes[:, 2]
387
+ y2 = boxes[:, 3]
388
+
389
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
390
+ order = scores.argsort()[::-1]
391
+
392
+ keep = []
393
+ while order.size > 0:
394
+ i = order[0]
395
+ keep.append(i)
396
+ xx1 = np.maximum(x1[i], x1[order[1:]])
397
+ yy1 = np.maximum(y1[i], y1[order[1:]])
398
+ xx2 = np.minimum(x2[i], x2[order[1:]])
399
+ yy2 = np.minimum(y2[i], y2[order[1:]])
400
+
401
+ w = np.maximum(0.0, xx2 - xx1 + 1)
402
+ h = np.maximum(0.0, yy2 - yy1 + 1)
403
+ inter = w * h
404
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
405
+
406
+ inds = np.where(ovr <= iou_threshold)[0]
407
+ order = order[inds + 1]
408
+
409
+ result = torch.tensor(np.stack(keep), device=device)
410
+ # Needed for data-dependent condition :(
411
+ assert result.size(0) >= 2
412
+ return result
413
+
414
+ @numpy_nms.register_fake
415
+ def _(boxes, scores, iou_threshold):
416
+ assert boxes.device == scores.device
417
+ N = boxes.shape[0]
418
+ assert boxes.shape == (N, 4)
419
+ assert scores.shape == (N,)
420
+
421
+ ctx = torch._custom_op.impl.get_ctx()
422
+ i0 = ctx.create_unbacked_symint()
423
+ result = boxes.new_empty([i0], dtype=torch.int64)
424
+ return result
425
+
426
+ def numpy_nms_vmap(info, in_dims, boxes, scores, iou_threshold):
427
+ raise NotImplementedError("Operator is data-dependent and cannot be vmapped.")
428
+
429
+ numpy_nms.register_vmap(numpy_nms_vmap)
430
+
431
+ def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs):
432
+ make_arg = functools.partial(make_tensor, device=device, dtype=dtype)
433
+ N = 64
434
+ xs = make_arg([N], low=0, high=28)
435
+ dx = make_arg([N], low=0, high=4)
436
+ ys = make_arg([N], low=0, high=28)
437
+ dy = make_arg([N], low=0, high=4)
438
+ boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad)
439
+ scores = make_arg([N], low=0, high=1, requires_grad=requires_grad)
440
+ iou_threshold = make_arg([], low=0, high=1).item()
441
+
442
+ yield SampleInput(boxes, args=(scores, iou_threshold))
443
+
444
+ custom_op_db = [
445
+ OpInfo(
446
+ 'NumpyCubeCustomOp',
447
+ op=numpy_cube._opoverload,
448
+ sample_inputs_func=sample_inputs_numpy_cube,
449
+ dtypes=all_types_and(torch.bool, torch.half),
450
+ supports_out=False,
451
+ ),
452
+ OpInfo(
453
+ 'NumpyMulCustomOp',
454
+ op=numpy_mul._opoverload,
455
+ sample_inputs_func=sample_inputs_numpy_mul,
456
+ dtypes=all_types_and(torch.bool, torch.half),
457
+ supports_out=False,
458
+ ),
459
+ OpInfo(
460
+ 'NumpyMulScalarCustomOp',
461
+ op=numpy_mul_scalar._opoverload,
462
+ sample_inputs_func=sample_inputs_numpy_mul_scalar,
463
+ dtypes=all_types_and(torch.bool, torch.half),
464
+ supports_out=False,
465
+ ),
466
+ OpInfo(
467
+ 'NumpySortCustomOp',
468
+ op=numpy_sort._opoverload,
469
+ sample_inputs_func=sample_inputs_numpy_sort,
470
+ dtypes=all_types_and(torch.bool, torch.half),
471
+ supports_out=False,
472
+ ),
473
+ OpInfo(
474
+ 'NumpyTakeCustomOp',
475
+ op=numpy_take._opoverload,
476
+ sample_inputs_func=sample_inputs_numpy_take,
477
+ dtypes=all_types_and(torch.bool, torch.half),
478
+ supports_out=False,
479
+ ),
480
+ OpInfo(
481
+ 'NumpyNonzeroCustomOp',
482
+ op=numpy_nonzero._opoverload,
483
+ sample_inputs_func=sample_inputs_numpy_nonzero,
484
+ dtypes=all_types_and(torch.bool, torch.half),
485
+ supports_autograd=False,
486
+ supports_out=False,
487
+ ),
488
+ OpInfo(
489
+ 'NumpyNMSCustomOp',
490
+ op=torch.ops._torch_testing.numpy_nms,
491
+ sample_inputs_func=sample_inputs_numpy_nms,
492
+ dtypes=all_types_and(torch.bool, torch.half),
493
+ supports_autograd=False,
494
+ supports_out=False,
495
+ ),
496
+ OpInfo(
497
+ 'NumpyViewCopyCustomOp',
498
+ op=torch.ops._torch_testing.numpy_view_copy,
499
+ sample_inputs_func=sample_inputs_numpy_view_copy,
500
+ dtypes=all_types_and(torch.bool, torch.half),
501
+ supports_autograd=True,
502
+ supports_out=False,
503
+ ),
504
+ OpInfo(
505
+ 'NumpyCatCustomOp',
506
+ op=torch.ops._torch_testing.numpy_cat,
507
+ sample_inputs_func=sample_inputs_numpy_cat,
508
+ dtypes=all_types_and(torch.bool, torch.half),
509
+ supports_autograd=True,
510
+ check_batched_grad=False,
511
+ check_batched_gradgrad=False,
512
+ supports_out=False,
513
+ ),
514
+ OpInfo(
515
+ 'NumpySplitCopyCustomOp',
516
+ op=torch.ops._torch_testing.numpy_split_copy,
517
+ sample_inputs_func=sample_inputs_numpy_split_copy,
518
+ dtypes=all_types_and(torch.bool, torch.half),
519
+ supports_autograd=True,
520
+ check_batched_grad=False,
521
+ check_batched_gradgrad=False,
522
+ supports_out=False,
523
+ ),
524
+ OpInfo(
525
+ 'NumpySplitCopyWithIntCustomOp',
526
+ op=torch.ops._torch_testing.numpy_split_copy_with_int,
527
+ sample_inputs_func=sample_inputs_numpy_split_copy,
528
+ dtypes=all_types_and(torch.bool, torch.half),
529
+ gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0],
530
+ supports_autograd=True,
531
+ check_batched_grad=False,
532
+ check_batched_gradgrad=False,
533
+ supports_out=False,
534
+ ),
535
+ ]
536
+
537
+
538
+ # ==============================================================
539
+ # some mechanical test cases
540
+ # ==============================================================
541
+
542
+ lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901
543
+
544
+ lib.define("source0(Tensor x) -> Tensor")
545
+
546
+ @torch.library.register_fake("_torch_testing::source0", lib=lib)
547
+ def _(x):
548
+ return x.clone()
549
+
550
+ lib.define("source1(Tensor x) -> Tensor")
551
+
552
+ def source1_fake(x):
553
+ return x.clone()
554
+
555
+ torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib)
556
+
557
+ lib.define("source2(Tensor x) -> Tensor")
558
+
559
+ @torch.library.register_fake("_torch_testing::source2", lib=lib)
560
+ def _(x):
561
+ return x.clone()
562
+
563
+ lib.define("source3(Tensor x) -> Tensor")
564
+
565
+ def source3_fake(x):
566
+ return x.clone()
567
+
568
+ torch.library.register_fake("_torch_testing::source3", source3_fake, lib=lib)
569
+
570
+
571
+ @torch.library.custom_op("_torch_testing::source4", mutates_args=())
572
+ def source4(x: Tensor) -> Tensor:
573
+ return x.clone()
574
+
575
+ @source4.register_fake
576
+ def _(x):
577
+ return x.clone()
578
+
579
+ @torch.library.custom_op("_torch_testing::source5", mutates_args=())
580
+ def source5(x: Tensor) -> Tensor:
581
+ return x.clone()
582
+
583
+ def source5_fake(x):
584
+ return x.clone()
585
+
586
+ source5.register_fake(source5_fake)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/custom_tensor.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import torch.utils._pytree as pytree
5
+ from torch.utils._python_dispatch import return_and_correct_aliasing
6
+
7
+
8
+ # A simple tensor subclass that holds a tensor with custom metadata and custom method
9
+ class ConstantExtraMetadataTensor(torch.Tensor):
10
+ @staticmethod
11
+ def __new__(cls, elem):
12
+ shape = elem.shape
13
+ kwargs = {}
14
+ kwargs["strides"] = elem.stride()
15
+ kwargs["storage_offset"] = elem.storage_offset()
16
+ kwargs["device"] = elem.device
17
+ kwargs["layout"] = elem.layout
18
+ kwargs["requires_grad"] = elem.requires_grad
19
+ kwargs["dtype"] = elem.dtype
20
+ return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
21
+
22
+ def __init__(self, elem):
23
+ self.elem = elem
24
+ self.constant_attribute = 4
25
+
26
+ def __repr__(self):
27
+ inner_repr = repr(self.elem)
28
+ return f"CustomTensor({inner_repr})"
29
+
30
+ def __tensor_flatten__(self):
31
+ return ["elem"], self.constant_attribute
32
+
33
+ def add_constant(self, a):
34
+ self.constant_attribute += a
35
+
36
+ @staticmethod
37
+ def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
38
+ assert meta is not None
39
+ elem = inner_tensors["elem"]
40
+ out = ConstantExtraMetadataTensor(elem)
41
+ out.constant_attribute = meta
42
+ return out
43
+
44
+ @classmethod
45
+ def __torch_dispatch__(cls, func, types, args, kwargs):
46
+ if kwargs is None:
47
+ kwargs = {}
48
+ args_inner = pytree.tree_map_only(
49
+ ConstantExtraMetadataTensor, lambda x: x.elem, args
50
+ )
51
+
52
+ kwargs_inner = pytree.tree_map_only(
53
+ ConstantExtraMetadataTensor, lambda x: x.elem, kwargs
54
+ )
55
+
56
+ out_inner = func(*args_inner, **kwargs_inner)
57
+ out_inner_flat, spec = pytree.tree_flatten(out_inner)
58
+ # for aten ops that return non-tensors, just assume that
59
+ # our cust inner tensors return the same value
60
+ out_flat = [
61
+ ConstantExtraMetadataTensor(o_inner)
62
+ if isinstance(o_inner, torch.Tensor)
63
+ else o_inner
64
+ for o_inner in out_inner_flat
65
+ ]
66
+ out = pytree.tree_unflatten(out_flat, spec)
67
+ return return_and_correct_aliasing(func, args, kwargs, out)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import re
4
+ import sys
5
+ import time
6
+ from functools import partial, wraps
7
+ from typing import Tuple
8
+
9
+ import torch.distributed as dist
10
+ import torch.distributed.rpc as rpc
11
+ from torch.distributed.rpc import _rref_context_get_debug_info
12
+ from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN
13
+
14
+
15
+ if not dist.is_available():
16
+ print("c10d not available, skipping tests", file=sys.stderr)
17
+ sys.exit(0)
18
+
19
+
20
+ INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}"
21
+
22
+ def dist_init(
23
+ old_test_method=None,
24
+ setup_rpc: bool = True,
25
+ clean_shutdown: bool = True,
26
+ faulty_messages=None,
27
+ messages_to_delay=None,
28
+ ):
29
+ """
30
+ We use this decorator for setting up and tearing down state since
31
+ MultiProcessTestCase runs each `test*` method in a separate process and
32
+ each process just runs the `test*` method without actually calling
33
+ 'setUp' and 'tearDown' methods of unittest.
34
+
35
+ Note: pass the string representation of MessageTypes that should be used
36
+ with the faulty agent's send function. By default, all retriable messages
37
+ ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE",
38
+ "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is
39
+ set from faulty_rpc_agent_test_fixture.py).
40
+ """
41
+ # If we use dist_init without arguments (ex: @dist_init), old_test_method is
42
+ # appropriately set and we return the wrapper appropriately. On the other
43
+ # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
44
+ # old_test_method is None and we return a functools.partial which is the real
45
+ # decorator that is used and as a result we recursively call dist_init with
46
+ # old_test_method and the rest of the arguments appropriately set.
47
+ if old_test_method is None:
48
+ return partial(
49
+ dist_init,
50
+ setup_rpc=setup_rpc,
51
+ clean_shutdown=clean_shutdown,
52
+ faulty_messages=faulty_messages,
53
+ messages_to_delay=messages_to_delay,
54
+ )
55
+
56
+ @wraps(old_test_method)
57
+ def new_test_method(self, *arg, **kwargs):
58
+ # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted
59
+ # in tests.
60
+ import torch.distributed.rpc.api as api
61
+
62
+ api._ignore_rref_leak = False
63
+ self.worker_id = self.rank
64
+ self.setup_fault_injection(faulty_messages, messages_to_delay)
65
+
66
+ rpc_backend_options = self.rpc_backend_options
67
+ if setup_rpc:
68
+ if TEST_WITH_TSAN:
69
+ # TSAN runs much slower.
70
+ rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5
71
+ rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60
72
+
73
+ rpc.init_rpc(
74
+ name="worker%d" % self.rank,
75
+ backend=self.rpc_backend,
76
+ rank=self.rank,
77
+ world_size=self.world_size,
78
+ rpc_backend_options=rpc_backend_options,
79
+ )
80
+
81
+ return_value = old_test_method(self, *arg, **kwargs)
82
+
83
+ if setup_rpc:
84
+ rpc.shutdown(graceful=clean_shutdown)
85
+
86
+ return return_value
87
+
88
+ return new_test_method
89
+
90
+
91
+ def noop() -> None:
92
+ pass
93
+
94
+
95
+ def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str:
96
+ """
97
+ Loops until an RPC to the given rank fails. This is used to
98
+ indicate that the node has failed in unit tests.
99
+ Args:
100
+ rank (int): Rank of the node expected to fail
101
+ expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure
102
+ occurs, not just any.
103
+ """
104
+ while True:
105
+ try:
106
+ rpc.rpc_sync(f"worker{rank}", noop, args=())
107
+ time.sleep(0.1)
108
+ except Exception as e:
109
+ if re.search(pattern=expected_error_regex, string=str(e)):
110
+ return str(e)
111
+
112
+
113
+ def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None:
114
+ """
115
+ The RRef protocol holds forkIds of rrefs in a map until those forks are
116
+ confirmed by the owner. The message confirming the fork may arrive after
117
+ our tests check whether this map is empty, which leads to failures and
118
+ flaky tests. to_here also does not guarantee that we have finished
119
+ processind the owner's confirmation message for the RRef. This function
120
+ loops until the map is empty, which means the messages have been received
121
+ as processed. Call this function before asserting the map returned by
122
+ _get_debug_info is empty.
123
+ """
124
+ start = time.time()
125
+ while True:
126
+ debug_info = _rref_context_get_debug_info()
127
+ num_pending_futures = int(debug_info["num_pending_futures"])
128
+ num_pending_users = int(debug_info["num_pending_users"])
129
+ if num_pending_futures == 0 and num_pending_users == 0:
130
+ break
131
+ time.sleep(0.1)
132
+ if time.time() - start > timeout:
133
+ raise ValueError(
134
+ f"Timed out waiting to flush pending futures and users, "
135
+ f"had {num_pending_futures} pending futures and {num_pending_users} pending users"
136
+ )
137
+
138
+
139
+ def get_num_owners_and_forks() -> Tuple[str, str]:
140
+ """
141
+ Retrieves number of OwnerRRefs and forks on this node from
142
+ _rref_context_get_debug_info.
143
+ """
144
+ rref_dbg_info = _rref_context_get_debug_info()
145
+ num_owners = rref_dbg_info["num_owner_rrefs"]
146
+ num_forks = rref_dbg_info["num_forks"]
147
+ return num_owners, num_forks
148
+
149
+
150
+ def wait_until_owners_and_forks_on_rank(
151
+ num_owners: int, num_forks: int, rank: int, timeout: int = 20
152
+ ) -> None:
153
+ """
154
+ Waits until timeout for num_forks and num_owners to exist on the rank. Used
155
+ to ensure proper deletion of RRefs in tests.
156
+ """
157
+ start = time.time()
158
+ while True:
159
+ num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync(
160
+ worker_name(rank), get_num_owners_and_forks, args=(), timeout=5
161
+ )
162
+ num_owners_on_rank = int(num_owners_on_rank)
163
+ num_forks_on_rank = int(num_forks_on_rank)
164
+ if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks:
165
+ return
166
+ time.sleep(1)
167
+ if time.time() - start > timeout:
168
+ raise ValueError(
169
+ f"Timed out waiting {timeout} sec for {num_owners} owners and {num_forks} forks on rank,"
170
+ f" had {num_owners_on_rank} owners and {num_forks_on_rank} forks"
171
+ )
172
+
173
+
174
+ def initialize_pg(init_method, rank: int, world_size: int) -> None:
175
+ # This is for tests using `dist.barrier`.
176
+ if not dist.is_initialized():
177
+ dist.init_process_group(
178
+ backend="gloo",
179
+ init_method=init_method,
180
+ rank=rank,
181
+ world_size=world_size,
182
+ )
183
+
184
+
185
+ def worker_name(rank: int) -> str:
186
+ return f"worker{rank}"
187
+
188
+
189
+ def get_function_event(function_events, partial_event_name):
190
+ """
191
+ Returns the first event that matches partial_event_name in the provided
192
+ function_events. These function_events should be the output of
193
+ torch.autograd.profiler.function_events().
194
+
195
+ Args:
196
+ function_events: function_events returned by the profiler.
197
+ event_name (str): partial key that the event was profiled with.
198
+ """
199
+ event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015
200
+ return event
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # mypy: allow-untyped-defs
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import sys
4
+ from functools import wraps, partial
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from torch.distributed import rpc
9
+ from torch.testing._internal.common_distributed import (
10
+ MultiProcessTestCase,
11
+ TEST_SKIPS,
12
+ tp_transports,
13
+ )
14
+
15
+ TEST_GPU_NUM = 4
16
+
17
+ class ShardedTensorTestBase(MultiProcessTestCase):
18
+ @property
19
+ def world_size(self):
20
+ return TEST_GPU_NUM
21
+
22
+ def init_pg(self, backend="nccl"):
23
+ if backend not in ["nccl", "gloo", "mpi"]:
24
+ raise RuntimeError(f"Backend {backend} not supported!")
25
+
26
+ dist.init_process_group(
27
+ backend=backend,
28
+ world_size=self.world_size,
29
+ rank=self.rank,
30
+ init_method=f"file://{self.file_name}",
31
+ )
32
+
33
+ # set device for nccl pg for collectives
34
+ if backend == "nccl":
35
+ torch.cuda.set_device(self.rank)
36
+
37
+
38
+ def init_rpc(self):
39
+ rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports())
40
+ rpc_backend_options.init_method = f"file://{self.file_name}"
41
+ for rank in range(self.world_size):
42
+ rpc_backend_options.set_device_map(
43
+ f"worker{rank}", {rank: self.rank, self.rank: rank}
44
+ )
45
+
46
+ rpc.init_rpc(
47
+ name="worker%d" % self.rank,
48
+ rank=self.rank,
49
+ world_size=self.world_size,
50
+ rpc_backend_options=rpc_backend_options,
51
+ )
52
+
53
+ def init_comms(self, init_rpc=True, backend="nccl"):
54
+ if init_rpc:
55
+ self.init_rpc()
56
+ self.init_pg(backend=backend)
57
+
58
+ def destroy_comms(self, destroy_rpc=True):
59
+ # Wait for all ranks to reach here before starting shutdown.
60
+ dist.barrier()
61
+
62
+ if destroy_rpc:
63
+ rpc.shutdown()
64
+ dist.destroy_process_group()
65
+
66
+ def setUp(self) -> None:
67
+ super().setUp()
68
+ self._spawn_processes()
69
+
70
+ def assert_sharded_tensor_equal(self, st1, st2):
71
+ st1_local_shards = st1.local_shards()
72
+ st2_local_shards = st2.local_shards()
73
+ self.assertEqual(len(st1_local_shards), len(st2_local_shards))
74
+ for i, st1_local_shard in enumerate(st1_local_shards):
75
+ self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor)
76
+ self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata)
77
+
78
+ self.assertEqual(st1.metadata(), st2.metadata())
79
+ self.assertEqual(st1.sharding_spec(), st2.sharding_spec())
80
+ self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards()))
81
+
82
+ # wrapper to initialize comms (processgroup + rpc)
83
+ def with_comms(func=None, init_rpc=True, backend="nccl"):
84
+ if func is None:
85
+ return partial(
86
+ with_comms,
87
+ init_rpc=init_rpc,
88
+ backend=backend,
89
+ )
90
+
91
+ @wraps(func)
92
+ def wrapper(self, *args, **kwargs):
93
+ if backend == "nccl" and torch.cuda.device_count() < self.world_size:
94
+ sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
95
+ self.init_comms(init_rpc=init_rpc, backend=backend)
96
+ func(self, *args, **kwargs)
97
+ self.destroy_comms(destroy_rpc=init_rpc)
98
+ return wrapper
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/__pycache__/_test_ops_common.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_ops_common.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import builtins
4
+
5
+ import torch
6
+ from torch.distributed._shard.sharding_spec import (
7
+ ChunkShardingSpec,
8
+ EnumerableShardingSpec,
9
+ ShardMetadata,
10
+ )
11
+ from torch.distributed._shard.sharding_spec._internals import (
12
+ get_chunked_dim_size,
13
+ get_split_size,
14
+ )
15
+
16
+
17
+ def generate_chunk_sharding_specs_for_test(sharding_dim):
18
+ return [
19
+ ChunkShardingSpec(
20
+ dim=sharding_dim,
21
+ placements=[
22
+ "rank:0/cuda:0",
23
+ "rank:1/cuda:1",
24
+ "rank:2/cuda:2",
25
+ "rank:3/cuda:3",
26
+ ],
27
+ ),
28
+ # Test different ordering. (Case 1)
29
+ ChunkShardingSpec(
30
+ dim=sharding_dim,
31
+ placements=[
32
+ "rank:2/cuda:2",
33
+ "rank:3/cuda:3",
34
+ "rank:0/cuda:0",
35
+ "rank:1/cuda:1",
36
+ ],
37
+ ),
38
+ # Test different ordering. (Case 2)
39
+ ChunkShardingSpec(
40
+ dim=sharding_dim,
41
+ placements=[
42
+ "rank:3/cuda:3",
43
+ "rank:0/cuda:0",
44
+ "rank:1/cuda:1",
45
+ "rank:2/cuda:2",
46
+ ],
47
+ ),
48
+ ]
49
+
50
+
51
+ def generate_enumerable_sharding_specs_for_test():
52
+ return [
53
+ EnumerableShardingSpec(
54
+ [
55
+ ShardMetadata(
56
+ shard_offsets=[0, 0],
57
+ shard_sizes=[5, 5],
58
+ placement="rank:0/cuda:0",
59
+ ),
60
+ ShardMetadata(
61
+ shard_offsets=[5, 0],
62
+ shard_sizes=[5, 5],
63
+ placement="rank:1/cuda:1",
64
+ ),
65
+ ShardMetadata(
66
+ shard_offsets=[0, 5],
67
+ shard_sizes=[5, 5],
68
+ placement="rank:2/cuda:2",
69
+ ),
70
+ ShardMetadata(
71
+ shard_offsets=[5, 5],
72
+ shard_sizes=[5, 5],
73
+ placement="rank:3/cuda:3",
74
+ ),
75
+ ]
76
+ )
77
+ ]
78
+
79
+
80
+ def generate_local_weight_sharding_params_for_test(
81
+ local_weight, sharded_dim, gpu_num, spec, rank
82
+ ):
83
+ """
84
+ Shard the local weight based the given spec, so we can compare against
85
+ the one from sharded tensor.
86
+
87
+ Args:
88
+ local_weight: weight matrix to be sharded.
89
+ sharded_dim: The dimension which we shard on.
90
+ gpu_num: number of ranks.
91
+ spec: sharding spec.
92
+ rank: # of cuda process.
93
+
94
+ Returns:
95
+ start_pos: start position of sharded weight on the given rank.
96
+ chunk_size: chunk size of sharded weight on the given rank.
97
+ """
98
+ sharding_dim_size = local_weight.size(sharded_dim)
99
+ split_size = get_split_size(sharding_dim_size, gpu_num)
100
+ current_offsets = 0
101
+ start_pos = current_offsets
102
+ for idx, placement in enumerate(spec.placements):
103
+ chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
104
+ if rank == placement.rank():
105
+ start_pos = current_offsets
106
+ break
107
+ current_offsets += chunk_size
108
+ return start_pos, chunk_size
109
+
110
+
111
+ def clone_module_parameter(module, param_name):
112
+ """
113
+ Clone a parameter from a given existing module.
114
+
115
+ Args:
116
+ module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned.
117
+ param_name (str): Name of the parameter of ``module`` that needs to be cloned.
118
+
119
+ Returns: cloned tensor as :class:`torch.nn.Parameter`.
120
+ """
121
+ tensor = getattr(module, param_name)
122
+ return torch.nn.Parameter(tensor.detach().clone())
123
+
124
+ def gen_binary_op_func(python_op, inplace=False):
125
+ src_lines = ['def f(lhs, rhs):']
126
+ if "torch" in python_op:
127
+ src_lines.append(f' return {python_op}(lhs, rhs)\n')
128
+ elif inplace:
129
+ src_lines.append(f' lhs {python_op}= rhs\n return lhs\n')
130
+ else:
131
+ src_lines.append(f' return lhs {python_op} rhs\n')
132
+
133
+ code_str = '\n'.join(src_lines)
134
+ g = {'torch': torch}
135
+ builtins.exec(code_str, g)
136
+ return g["f"]
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/sharded_tensor/_test_st_common.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import copy
4
+ import random
5
+ import torch
6
+ from torch.distributed._shard import sharded_tensor
7
+
8
+ from torch.distributed._shard.sharding_spec import (
9
+ ChunkShardingSpec,
10
+ )
11
+
12
+ PLACEMENTS = [
13
+ "rank:0/cuda:0",
14
+ "rank:1/cuda:1",
15
+ "rank:2/cuda:2",
16
+ "rank:3/cuda:3",
17
+ ]
18
+
19
+ DEFAULT_GPU_NUM = 4
20
+
21
+
22
+ def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0):
23
+ spec_list = []
24
+ for i in range(len(sharding_dims)):
25
+ random.Random(seed + i).shuffle(PLACEMENTS)
26
+ spec_list.append(
27
+ ChunkShardingSpec(
28
+ dim=sharding_dims[i],
29
+ placements=copy.deepcopy(PLACEMENTS),
30
+ )
31
+ )
32
+ return spec_list
33
+
34
+ class MyShardedModel2(torch.nn.Module):
35
+ def __init__(
36
+ self,
37
+ spec=None,
38
+ group=None,
39
+ init_rrefs=True
40
+ ) -> None:
41
+ super().__init__()
42
+ if spec is not None:
43
+ self.sharded_tensor2 = sharded_tensor.rand(
44
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
45
+ )
46
+ else:
47
+ self.sharded_tensor2 = None
48
+ self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2))
49
+
50
+
51
+ class MyShardedModel1(torch.nn.Module):
52
+ def __init__(
53
+ self,
54
+ spec=None,
55
+ group=None,
56
+ init_rrefs=True
57
+ ) -> None:
58
+ super().__init__()
59
+ if spec is not None:
60
+ self.sharded_tensor1 = sharded_tensor.rand(
61
+ spec, 10, 20, process_group=group, init_rrefs=init_rrefs
62
+ )
63
+ else:
64
+ self.sharded_tensor1 = None
65
+ self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2))
66
+ self.submodule = MyShardedModel2(spec, group, init_rrefs)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/_shard/test_common.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
7
+
8
+
9
+ class SimpleMegatronLM(nn.Module):
10
+ def __init__(self, linear_size, rank=None, dtype=torch.float32):
11
+ super().__init__()
12
+ self.fc1 = nn.Linear(*linear_size[0], dtype=dtype)
13
+ self.gelu = nn.GELU()
14
+ self.fc2 = nn.Linear(*linear_size[1], dtype=dtype)
15
+ if rank is not None:
16
+ self.fc1.cuda(rank)
17
+ self.fc2.cuda(rank)
18
+
19
+ def forward(self, inp):
20
+ return self.fc2(self.gelu(self.fc1(inp)))
21
+
22
+ def get_weights(self):
23
+ if isinstance(self.fc1.weight, ShardedTensor):
24
+ weight1 = self.fc1.weight.local_tensor()
25
+ else:
26
+ weight1 = self.fc1.weight
27
+
28
+ if isinstance(self.fc2.weight, ShardedTensor):
29
+ weight2 = self.fc2.weight.local_tensor()
30
+ else:
31
+ weight2 = self.fc2.weight
32
+
33
+ return (weight1, weight2)
34
+
35
+ def get_biases(self):
36
+ return (self.fc1.bias, self.fc2.bias)
37
+
38
+ def get_weight_grads(self):
39
+ return (self.fc1.weight.grad, self.fc2.weight.grad)
40
+
41
+ def get_bias_grads(self):
42
+ return (self.fc1.bias.grad, self.fc2.bias.grad)
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/checkpoint_utils.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates
4
+
5
+ import os
6
+ import shutil
7
+ import tempfile
8
+ from functools import wraps
9
+ from typing import Any, Callable, Dict, Optional, Tuple
10
+
11
+ import torch.distributed as dist
12
+
13
+
14
+ def with_temp_dir(
15
+ func: Optional[Callable] = None,
16
+ ) -> Optional[Callable]:
17
+ """
18
+ Wrapper to initialize temp directory for distributed checkpoint.
19
+ """
20
+ assert func is not None
21
+
22
+ @wraps(func)
23
+ def wrapper(self, *args: Tuple[object], **kwargs: Dict[str, Any]) -> None:
24
+ if dist.is_initialized():
25
+ # Only create temp_dir when rank is 0
26
+ if dist.get_rank() == 0:
27
+ temp_dir = tempfile.mkdtemp()
28
+ print(f"Using temp directory: {temp_dir}")
29
+ else:
30
+ temp_dir = ""
31
+ object_list = [temp_dir]
32
+
33
+ # Broadcast temp_dir to all the other ranks
34
+ os.sync()
35
+ dist.broadcast_object_list(object_list)
36
+ self.temp_dir = object_list[0]
37
+ os.sync()
38
+ else:
39
+ temp_dir = tempfile.mkdtemp()
40
+ print(f"No process group initialized, using temp directory: {temp_dir}")
41
+ self.temp_dir = temp_dir
42
+
43
+ try:
44
+ func(self, *args, **kwargs)
45
+ finally:
46
+ if dist.is_initialized() and dist.get_rank() == 0:
47
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
48
+ else:
49
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
50
+
51
+ return wrapper
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/distributed_test.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/fake_pg.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import torch.distributed as dist
4
+
5
+ from torch._C._distributed_c10d import (
6
+ FakeProcessGroup,
7
+ )
8
+
9
+
10
+ class FakeStore(dist.Store):
11
+ """
12
+ A fake store is a fake Key-Value store simply for initialization usage
13
+ the of fake process group, one can either use FakeStore or HashStore.
14
+ """
15
+
16
+
17
+ def _create_fake_pg(prefix_store, rank, world_size, timeout):
18
+ """
19
+ A fake process group (not related to FakeTensor) is a process group which
20
+ doesn't actually do any communication, it just hallucinates some
21
+ communication. You can run a single rank with a fake process group
22
+ without needing multiple processes (simulates per-rank behavior)
23
+
24
+ NOTE: This is not a real process group, and it would produce wrong results
25
+ for every collective. It should be used as a convinient tool when playing
26
+ with distributed but don't care about the actual data.
27
+ """
28
+ return FakeProcessGroup(rank, world_size)
29
+
30
+
31
+ dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda'])
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/multi_threaded_pg.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import sys
4
+ import threading
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional, Tuple, Union
7
+ from functools import partial, reduce
8
+
9
+ import torch
10
+ import torch.distributed as dist
11
+ import weakref
12
+ from torch._C._distributed_c10d import (
13
+ _create_work_from_future,
14
+ AllgatherOptions,
15
+ AllreduceOptions,
16
+ AllToAllOptions,
17
+ BarrierOptions,
18
+ BroadcastOptions,
19
+ ReduceScatterOptions,
20
+ ScatterOptions,
21
+ Store,
22
+ ReduceOp,
23
+ )
24
+ from torch.distributed.distributed_c10d import _CollOp, _store_based_barrier, P2POp
25
+ from torch.futures import Future
26
+ from torch.utils import _pytree as pytree
27
+
28
+ """
29
+ TODO:
30
+ Lots of missing collectives.
31
+ Collectives validation.
32
+ Make timeout robust by making collectives respect the test deadline.
33
+ Make tests robust by making collectives interruptible.
34
+ We need some synchronization around cleanup to ensure that timedout ranks don't cause spurious failures.
35
+
36
+ """
37
+
38
+
39
+ def flatten_list(lst):
40
+ return pytree.tree_leaves(lst)
41
+
42
+
43
+ def ret_work(ret):
44
+ fut = Future()
45
+ fut.set_result(ret)
46
+ return _create_work_from_future(fut)
47
+
48
+ def binop_reduce(tensors, op):
49
+ res = op(torch.stack(tensors), dim=0)
50
+ if isinstance(res, torch.Tensor):
51
+ return res
52
+ # min/max return a namedtuple
53
+ return res.values
54
+
55
+ def bitwise_reduce(tensors, op):
56
+ return reduce(op, tensors)
57
+
58
+ _reduce_ops = {
59
+ ReduceOp.SUM: partial(binop_reduce, op=torch.sum),
60
+ ReduceOp.AVG: partial(binop_reduce, op=torch.mean),
61
+ ReduceOp.PRODUCT: partial(binop_reduce, op=torch.prod),
62
+ ReduceOp.MIN: partial(binop_reduce, op=torch.min),
63
+ ReduceOp.MAX: partial(binop_reduce, op=torch.max),
64
+ ReduceOp.BAND: partial(bitwise_reduce, op=torch.bitwise_and),
65
+ ReduceOp.BOR: partial(bitwise_reduce, op=torch.bitwise_or),
66
+ ReduceOp.BXOR: partial(bitwise_reduce, op=torch.bitwise_xor),
67
+ }
68
+
69
+ class AllToAll:
70
+ @torch.no_grad()
71
+ def work(self, data):
72
+ world_size = len(data)
73
+ for dest_rank in range(world_size):
74
+ output_tensor_list, _ = data[dest_rank]
75
+ for src_rank in range(world_size):
76
+ _, input_tensor_list = data[src_rank]
77
+ output_tensor_list[src_rank].copy_(input_tensor_list[dest_rank])
78
+
79
+ class AllToAllBase:
80
+ @torch.no_grad()
81
+ def work(self, data):
82
+ world_size = len(data)
83
+ for dest_rank in range(world_size):
84
+ output_buffer, _, output_split_sizes, _ = data[dest_rank]
85
+
86
+ output_indexes = self._size_cumsum(output_buffer.size(0), output_split_sizes, world_size)
87
+
88
+ for src_rank in range(world_size):
89
+ _, input_buffer, _, input_split_sizes = data[src_rank]
90
+ input_indexes = self._size_cumsum(input_buffer.size(0), input_split_sizes, world_size)
91
+
92
+ output_buffer[output_indexes[src_rank]:output_indexes[src_rank + 1]].copy_(
93
+ input_buffer[input_indexes[dest_rank]:input_indexes[dest_rank + 1]]
94
+ )
95
+
96
+ def _size_cumsum(self, buf_size: int, sizes: Union[torch.Tensor, List[int], None], world_size: int) -> torch.Tensor:
97
+ if sizes is None or len(sizes) == 0:
98
+ sizes = torch.full(
99
+ (world_size,), buf_size // world_size, dtype=torch.int64
100
+ )
101
+ if not isinstance(sizes, torch.Tensor):
102
+ sizes = torch.tensor(sizes, dtype=torch.int64)
103
+ assert sizes.dtype == torch.int64
104
+ sizes = torch.cumsum(
105
+ torch.cat(
106
+ (
107
+ torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes
108
+ ),
109
+ dim=0
110
+ ),
111
+ dim=0
112
+ )
113
+ return sizes
114
+
115
+ class AllReduce:
116
+ def __init__(self, op):
117
+ if op.op not in _reduce_ops:
118
+ raise NotImplementedError(
119
+ f"AllReduce op {op.op} not supported on multithreaded pg for now."
120
+ )
121
+ self.op = op.op
122
+
123
+ @torch.no_grad()
124
+ def work(self, data):
125
+ for i in range(len(data[0])):
126
+ tensors = []
127
+ # use rank0 as the device for sum
128
+ rank_0_device = data[0][i].device
129
+ # collect all data to the list and make them
130
+ # all on rank 0 device
131
+ for src_rank in range(0, len(data)):
132
+ tensors.append(data[src_rank][i].to(rank_0_device))
133
+
134
+ # now mimic reduce across all ranks
135
+ res = _reduce_ops[self.op](tensors)
136
+
137
+ # copy all the reduced value to each rank
138
+ for src_rank in range(len(data)):
139
+ data[src_rank][i].copy_(res.to(data[src_rank][i].device))
140
+
141
+
142
+ class AllGather:
143
+ @torch.no_grad()
144
+ def work(self, data):
145
+ for src_rank in range(len(data)):
146
+ in_tensor_list = data[src_rank][1]
147
+ # Can't handle all_gather with multiple tensors
148
+ assert len(in_tensor_list) == 1
149
+ src_tensor = in_tensor_list[0]
150
+
151
+ for dest in data:
152
+ dest_tensor = dest[0][0][src_rank]
153
+ dest_tensor.copy_(src_tensor)
154
+
155
+
156
+ class Scatter:
157
+ def __init__(self, src):
158
+ self.src = src
159
+
160
+ @torch.no_grad()
161
+ def work(self, data):
162
+ src_in_tensor_list = data[self.src][1]
163
+ # Can't handle scatter with multiple input tensor list
164
+ assert len(src_in_tensor_list) == 1
165
+ src_in_tensors = src_in_tensor_list[0]
166
+
167
+ for rank, each_rank_data in enumerate(data):
168
+ out_tensor_list = each_rank_data[0]
169
+ # Can't handle scatter with multiple output tensor
170
+ assert len(out_tensor_list) == 1
171
+ dest_tensor = out_tensor_list[0]
172
+ dest_tensor.copy_(src_in_tensors[rank])
173
+
174
+
175
+ class Gather:
176
+ def __init__(self, dst):
177
+ self.dst = dst
178
+
179
+ @torch.no_grad()
180
+ def work(self, data):
181
+ # Can't handle gather with multiple tensor lists
182
+ assert len(data[self.dst][0]) == 1
183
+ out_tensor_list = data[self.dst][0][0]
184
+ for rank, each_rank_data in enumerate(data):
185
+ src_in_tensor_list = each_rank_data[1]
186
+ # Can't handle gather with multiple tensor lists
187
+ assert len(src_in_tensor_list) == 1
188
+ dest_tensor = out_tensor_list[rank]
189
+ dest_tensor.copy_(src_in_tensor_list[0])
190
+
191
+ class ReduceScatter:
192
+ def __init__(self, op):
193
+ if op != dist.ReduceOp.SUM and op != dist.ReduceOp.AVG:
194
+ raise NotImplementedError(f"ReduceScatter does not support {op}")
195
+ self.op = op
196
+
197
+ @torch.no_grad()
198
+ def work(self, data):
199
+ start_reduction = [False for _ in range(len(data))]
200
+ for each_rank_data in data:
201
+ # Can't handle reduce_scatter with multiple scatter list
202
+ assert len(each_rank_data[1]) == 1
203
+ to_scatter = each_rank_data[1][0]
204
+ for i in range(len(to_scatter)):
205
+ dest_tensor_on_rank_i = data[i][0]
206
+ # Can't handle reduce_scatter with multiple output tensor
207
+ assert len(dest_tensor_on_rank_i) == 1
208
+ dst_tensor_device = dest_tensor_on_rank_i[0].device
209
+ if not start_reduction[i]:
210
+ dest_tensor_on_rank_i[0].copy_(to_scatter[i].to(dst_tensor_device))
211
+ start_reduction[i] = True
212
+ else:
213
+ dest_tensor_on_rank_i[0].add_(to_scatter[i].to(dst_tensor_device))
214
+ if self.op == dist.ReduceOp.AVG:
215
+ num_ranks = len(data)
216
+ for each_rank_data in data:
217
+ each_rank_data[0][0] /= num_ranks
218
+
219
+
220
+ class Broadcast:
221
+ def __init__(self, src):
222
+ self.src = src
223
+
224
+ @torch.no_grad()
225
+ def work(self, data):
226
+ in_tensor_list = flatten_list(data[self.src])
227
+ for i in range(len(data)):
228
+ out_tensor_list = flatten_list(data[i])
229
+ for j in range(len(in_tensor_list)):
230
+ out_tensor_list[j].copy_(in_tensor_list[j])
231
+
232
+
233
+ class Collective:
234
+ def __init__(self, world_size, collective, pg):
235
+ self._world_size = world_size
236
+ self._collective = collective
237
+
238
+ self._start_cond = threading.Condition()
239
+ self._done_cond = threading.Condition()
240
+
241
+ self._data = [None] * world_size
242
+ self._count = 0
243
+ self._done = False
244
+
245
+ self._pg = pg
246
+
247
+ def join(self, rank, data):
248
+ with self._start_cond:
249
+ self._data[rank] = data
250
+ self._count += 1
251
+
252
+ # notify rank 0
253
+ if self._count == self._world_size:
254
+ if rank > 0:
255
+ self._start_cond.notify()
256
+
257
+ if rank == 0:
258
+ self._start_cond.wait_for(
259
+ lambda: self._count == self._world_size or self._pg._terminate.is_set()
260
+ )
261
+ # SystemExit is not a subclass of Exception but BaseException
262
+ # and can be distinguished from normal exception raised from program errors
263
+ # so that we can hide it from the exception queue
264
+ if self._pg._terminate.is_set():
265
+ sys.exit("Test termination event occurs.")
266
+
267
+ with self._done_cond:
268
+ # wait for rank 0 to finish
269
+ if rank > 0:
270
+ self._done_cond.wait_for(lambda: self._done or self._pg._terminate.is_set())
271
+ if self._pg._terminate.is_set():
272
+ sys.exit("Test termination event occurs.")
273
+ else:
274
+ # copy data around
275
+ self._collective.work(self._data)
276
+ self._done = True
277
+ self._done_cond.notify_all()
278
+ return ret_work(data)
279
+
280
+
281
+ class ProcessLocalGroup(dist.ProcessGroup):
282
+ _coll_lock = threading.Lock()
283
+ _cur_coll_on_pgs = {}
284
+
285
+ _terminate = threading.Event()
286
+
287
+ @classmethod
288
+ def _start_coll(cls, collective, pg):
289
+ with cls._coll_lock:
290
+ # pg_name is unique, we use that to record the mapping between pg and collective
291
+ if pg.pg_name not in cls._cur_coll_on_pgs:
292
+ cls._cur_coll_on_pgs[pg.pg_name] = Collective(pg.size(), collective, cls)
293
+ return cls._cur_coll_on_pgs[pg.pg_name]
294
+
295
+ @classmethod
296
+ def _end_coll(cls, collective, pg):
297
+ # This is racily called by all ranks, so only one will work
298
+ with cls._coll_lock:
299
+ if pg.pg_name in cls._cur_coll_on_pgs and cls._cur_coll_on_pgs[pg.pg_name] == collective:
300
+ cls._cur_coll_on_pgs.pop(pg.pg_name)
301
+
302
+ @classmethod
303
+ def exception_handle(cls, exc):
304
+ cls._terminate.set()
305
+ for coll in cls._cur_coll_on_pgs.values():
306
+ with coll._start_cond:
307
+ coll._start_cond.notify()
308
+ with coll._done_cond:
309
+ coll._done_cond.notify_all()
310
+
311
+ @classmethod
312
+ def reset(cls):
313
+ with cls._coll_lock:
314
+ cls._cur_coll_on_pgs = {}
315
+ cls._terminate.clear()
316
+
317
+ def alltoall_base(
318
+ self,
319
+ output_buffer: torch.Tensor,
320
+ input_buffer: torch.Tensor,
321
+ output_split_sizes: Optional[List[int]],
322
+ input_split_sizes: Optional[List[int]],
323
+ opts=AllToAllOptions()
324
+ ) -> torch.Tensor:
325
+ coll = ProcessLocalGroup._start_coll(AllToAllBase(), self)
326
+ res = coll.join(self._rank, (output_buffer, input_buffer, output_split_sizes, input_split_sizes))
327
+ ProcessLocalGroup._end_coll(coll, self)
328
+ return res
329
+
330
+ def alltoall(self, output_tensor_list, input_tensor_list, opts=AllToAllOptions()):
331
+ coll = ProcessLocalGroup._start_coll(AllToAll(), self)
332
+ res = coll.join(self._rank, (output_tensor_list, input_tensor_list))
333
+ ProcessLocalGroup._end_coll(coll, self)
334
+ return res
335
+
336
+ def allreduce(self, tensor_list, opts=AllreduceOptions()):
337
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
338
+ res = coll.join(self._rank, tensor_list)
339
+ ProcessLocalGroup._end_coll(coll, self)
340
+ return res
341
+
342
+ def allreduce_coalesced(self, tensor_list, opts=AllreduceOptions()):
343
+ coll = ProcessLocalGroup._start_coll(AllReduce(opts.reduceOp), self)
344
+ res = coll.join(self._rank, tensor_list)
345
+ ProcessLocalGroup._end_coll(coll, self)
346
+ return res
347
+
348
+ def barrier(self, opts=BarrierOptions()):
349
+ return self.allreduce(tensor_list=[torch.ones(1)])
350
+
351
+ def allgather(self, output_tensors, input_tensor, opts=AllgatherOptions()):
352
+ coll = ProcessLocalGroup._start_coll(AllGather(), self)
353
+ res = coll.join(self._rank, (output_tensors, input_tensor))
354
+ ProcessLocalGroup._end_coll(coll, self)
355
+ return res
356
+
357
+ def _allgather_base(self, output_tensor, input_tensor, opts=AllgatherOptions()):
358
+ tensor_list = list(torch.chunk(output_tensor, self._world_size))
359
+ return self.allgather([tensor_list], [input_tensor], opts)
360
+
361
+ def broadcast(self, tensor_list, opts=BroadcastOptions()):
362
+ coll = ProcessLocalGroup._start_coll(Broadcast(opts.rootRank), self)
363
+ res = coll.join(self._rank, tensor_list)
364
+ ProcessLocalGroup._end_coll(coll, self)
365
+ return res
366
+
367
+ def scatter(self, output_tensors, input_tensors, opts=ScatterOptions()):
368
+ coll = ProcessLocalGroup._start_coll(Scatter(opts.rootRank), self)
369
+ res = coll.join(self._rank, (output_tensors, input_tensors))
370
+ ProcessLocalGroup._end_coll(coll, self)
371
+ return res
372
+
373
+ def gather(self, output_tensors, input_tensors, opts=ScatterOptions()):
374
+ coll = ProcessLocalGroup._start_coll(Gather(opts.rootRank), self)
375
+ res = coll.join(self._rank, (output_tensors, input_tensors))
376
+ ProcessLocalGroup._end_coll(coll, self)
377
+ return res
378
+
379
+ def reduce_scatter(self, output_tensor, scatter_list, opts=ReduceScatterOptions()):
380
+ coll = ProcessLocalGroup._start_coll(ReduceScatter(opts.reduceOp), self)
381
+ res = coll.join(self._rank, (output_tensor, scatter_list))
382
+ ProcessLocalGroup._end_coll(coll, self)
383
+ return res
384
+
385
+ def _reduce_scatter_base(self, output_tensor, input_tensor, opts=ReduceScatterOptions()):
386
+ tensor_list = list(torch.chunk(input_tensor, self._world_size))
387
+ return self.reduce_scatter([output_tensor], [tensor_list], opts)
388
+
389
+ def reduce_scatter_tensor_coalesced(self, output_tensors, input_tensors, opts=ReduceScatterOptions()):
390
+ works = [
391
+ self._reduce_scatter_base(output_tensor, input_tensor, opts)
392
+ for output_tensor, input_tensor
393
+ in zip(output_tensors, input_tensors)
394
+ ]
395
+ for work in works[:-1]:
396
+ work.wait()
397
+ return works[-1]
398
+
399
+ def allgather_into_tensor_coalesced(self, output_tensor_list, input_tensor_list, opts=AllgatherOptions()):
400
+ res = None
401
+ for o_t, i_t in zip(output_tensor_list, input_tensor_list):
402
+ res = self._allgather_base(o_t, i_t)
403
+ return res
404
+
405
+ def __init__(self, rank, world_size):
406
+ super().__init__(rank, world_size)
407
+ self._rank = rank
408
+ self._world_size = world_size
409
+ world = dist.distributed_c10d._world
410
+ if isinstance(world, ThreadLocalWorld):
411
+ world = world._get_world()
412
+ self._world = weakref.ref(world)
413
+ self._ctx = torch.autograd.set_multithreading_enabled(False)
414
+
415
+ def size(self):
416
+ return self._world_size
417
+
418
+ @property
419
+ def pg_name(self):
420
+ """
421
+ return the global registered name of the current pg in the world
422
+ """
423
+ return self._world().pg_names[self]
424
+
425
+ @property
426
+ def group_name(self):
427
+ return self.pg_name
428
+
429
+ def getBackendName(self):
430
+ return "threaded"
431
+
432
+ def __repr__(self):
433
+ return f"ThreadedPG world_size:{self._world_size} rank:{self._rank}"
434
+
435
+
436
+ def _create_threaded_pg(prefix_store, rank, world_size, timeout):
437
+ pg = ProcessLocalGroup(rank, world_size)
438
+ # https://github.com/pytorch/pytorch/pull/103033 changed store based barrier to optional
439
+ # When device mesh involves sub groups while store based barrier is not enabled in c10d,
440
+ # even though threaded pg actual collectives are assumed to be single threaded,
441
+ # different threads may be initializing different groups,
442
+ # leading to race conditions.
443
+ # For example, if we have a mesh of [[0, 1], [2, 3]], the sub groups
444
+ # (dim 0 and 1) would be initialized in different threads independently.
445
+ # In this case we can no longer rely on class or global variables
446
+ # but have to rely on store based barrier to make sure each group
447
+ # is ready separately before we can invoke collectives in any of the groups.
448
+
449
+ # the prefix store is already per group so we pass an empty name here
450
+ _store_based_barrier(rank, prefix_store, "", world_size, timeout)
451
+ return pg
452
+
453
+
454
+ dist.Backend.register_backend("threaded", _create_threaded_pg, devices=["cpu", "cuda"])
455
+
456
+
457
+ @dataclass
458
+ class WorldData:
459
+ default_pg: dist.ProcessGroup
460
+ pg_map: Dict[dist.ProcessGroup, Tuple[str, Optional[Store]]]
461
+ pg_names: Dict[dist.ProcessGroup, str]
462
+ pg_group_ranks: Dict[dist.ProcessGroup, Dict[int, int]]
463
+ pg_backend_config: Dict[dist.ProcessGroup, str]
464
+ group_count: int
465
+ tags_to_pg: Dict[str, List[dist.ProcessGroup]]
466
+ pg_to_tag: Dict[dist.ProcessGroup, str]
467
+ pg_coalesce_state: Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]
468
+ pg_default_device: Dict[dist.ProcessGroup, torch.device]
469
+
470
+
471
+ class ThreadLocalWorld:
472
+ _world = threading.local()
473
+
474
+ def _get_world(self) -> WorldData:
475
+ if not hasattr(ThreadLocalWorld._world, "world"):
476
+ ThreadLocalWorld._world.world = WorldData(None, {}, {}, {}, {}, 0, {}, {}, {}, {})
477
+ return ThreadLocalWorld._world.world
478
+
479
+ @property
480
+ def default_pg(self):
481
+ return self._get_world().default_pg
482
+
483
+ @default_pg.setter
484
+ def default_pg(self, value):
485
+ self._get_world().default_pg = value
486
+
487
+ @property
488
+ def pg_map(self):
489
+ return self._get_world().pg_map
490
+
491
+ @property
492
+ def pg_names(self):
493
+ return self._get_world().pg_names
494
+
495
+ @property
496
+ def pg_group_ranks(self):
497
+ return self._get_world().pg_group_ranks
498
+
499
+ @property
500
+ def pg_backend_config(self):
501
+ return self._get_world().pg_backend_config
502
+
503
+ @property
504
+ def group_count(self) -> int:
505
+ return self._get_world().group_count
506
+
507
+ @group_count.setter
508
+ def group_count(self, value):
509
+ self._get_world().group_count = value
510
+
511
+ @property
512
+ def tags_to_pg(self):
513
+ return self._get_world().tags_to_pg
514
+
515
+ @property
516
+ def pg_to_tag(self):
517
+ return self._get_world().pg_to_tag
518
+
519
+ @property
520
+ def pg_coalesce_state(self) -> Dict[dist.ProcessGroup, List[Union[_CollOp, P2POp]]]:
521
+ return self._get_world().pg_coalesce_state
522
+
523
+ @property
524
+ def pg_default_device(self) -> Dict[dist.ProcessGroup, torch.device]:
525
+ return self._get_world().pg_default_device
526
+
527
+
528
+ _old_pg_world = None
529
+ _ctx_manager = None
530
+
531
+
532
+ def _install_threaded_pg():
533
+ global _old_pg_world
534
+ global _ctx_manager
535
+ _old_pg_world = dist.distributed_c10d._world
536
+ dist.distributed_c10d._world = ThreadLocalWorld()
537
+ _ctx_manager = torch.autograd.set_multithreading_enabled(False)
538
+
539
+ return dist.distributed_c10d._world
540
+
541
+
542
+ def _uninstall_threaded_pg():
543
+ dist.distributed_c10d._world = _old_pg_world
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/nn/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc_utils.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+
3
+ import os
4
+ import sys
5
+ import unittest
6
+ from typing import Dict, List, Type
7
+
8
+ from torch.testing._internal.common_distributed import MultiProcessTestCase
9
+ from torch.testing._internal.common_utils import (
10
+ TEST_WITH_DEV_DBG_ASAN,
11
+ find_free_port,
12
+ IS_SANDCASTLE,
13
+ )
14
+ from torch.testing._internal.distributed.ddp_under_dist_autograd_test import (
15
+ CudaDdpComparisonTest,
16
+ DdpComparisonTest,
17
+ DdpUnderDistAutogradTest,
18
+ )
19
+ from torch.testing._internal.distributed.nn.api.remote_module_test import (
20
+ CudaRemoteModuleTest,
21
+ RemoteModuleTest,
22
+ ThreeWorkersRemoteModuleTest,
23
+ )
24
+ from torch.testing._internal.distributed.rpc.dist_autograd_test import (
25
+ DistAutogradTest,
26
+ CudaDistAutogradTest,
27
+ FaultyAgentDistAutogradTest,
28
+ TensorPipeAgentDistAutogradTest,
29
+ TensorPipeCudaDistAutogradTest
30
+ )
31
+ from torch.testing._internal.distributed.rpc.dist_optimizer_test import (
32
+ DistOptimizerTest,
33
+ )
34
+ from torch.testing._internal.distributed.rpc.jit.dist_autograd_test import (
35
+ JitDistAutogradTest,
36
+ )
37
+ from torch.testing._internal.distributed.rpc.jit.rpc_test import JitRpcTest
38
+ from torch.testing._internal.distributed.rpc.jit.rpc_test_faulty import (
39
+ JitFaultyAgentRpcTest,
40
+ )
41
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
42
+ RpcAgentTestFixture,
43
+ )
44
+ from torch.testing._internal.distributed.rpc.faulty_agent_rpc_test import (
45
+ FaultyAgentRpcTest,
46
+ )
47
+ from torch.testing._internal.distributed.rpc.rpc_test import (
48
+ CudaRpcTest,
49
+ RpcTest,
50
+ TensorPipeAgentRpcTest,
51
+ TensorPipeAgentCudaRpcTest,
52
+ )
53
+ from torch.testing._internal.distributed.rpc.examples.parameter_server_test import ParameterServerTest
54
+ from torch.testing._internal.distributed.rpc.examples.reinforcement_learning_rpc_test import (
55
+ ReinforcementLearningRpcTest,
56
+ )
57
+
58
+
59
+ def _check_and_set_tcp_init():
60
+ # if we are running with TCP init, set main address and port
61
+ # before spawning subprocesses, since different processes could find
62
+ # different ports.
63
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
64
+ if use_tcp_init == "1":
65
+ os.environ["MASTER_ADDR"] = '127.0.0.1'
66
+ os.environ["MASTER_PORT"] = str(find_free_port())
67
+
68
+ def _check_and_unset_tcp_init():
69
+ use_tcp_init = os.environ.get("RPC_INIT_WITH_TCP", None)
70
+ if use_tcp_init == "1":
71
+ del os.environ["MASTER_ADDR"]
72
+ del os.environ["MASTER_PORT"]
73
+
74
+ # The tests for the RPC module need to cover multiple possible combinations:
75
+ # - different aspects of the API, each one having its own suite of tests;
76
+ # - different agents (ProcessGroup, TensorPipe, ...);
77
+ # To avoid a combinatorial explosion in code size, and to prevent forgetting to
78
+ # add a combination, these are generated automatically by the code in this file.
79
+ # Here, we collect all the test suites that we need to cover.
80
+ # We then have one separate file for each agent, from which
81
+ # we call the generate_tests function of this file, passing to it a fixture for
82
+ # the agent, which then gets mixed-in with each test suite.
83
+
84
+ @unittest.skipIf(
85
+ TEST_WITH_DEV_DBG_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues"
86
+ )
87
+ class SpawnHelper(MultiProcessTestCase):
88
+ def setUp(self):
89
+ super().setUp()
90
+ _check_and_set_tcp_init()
91
+ self._spawn_processes()
92
+
93
+ def tearDown(self):
94
+ _check_and_unset_tcp_init()
95
+ super().tearDown()
96
+
97
+
98
+ # This list contains test suites that are agent-agnostic and that only verify
99
+ # compliance with the generic RPC interface specification. These tests should
100
+ # *not* make use of implementation details of a specific agent (options,
101
+ # attributes, ...). These test suites will be instantiated multiple times, once
102
+ # for each agent (except the faulty agent, which is special).
103
+ GENERIC_TESTS = [
104
+ RpcTest,
105
+ ParameterServerTest,
106
+ DistAutogradTest,
107
+ DistOptimizerTest,
108
+ JitRpcTest,
109
+ JitDistAutogradTest,
110
+ RemoteModuleTest,
111
+ ThreeWorkersRemoteModuleTest,
112
+ DdpUnderDistAutogradTest,
113
+ DdpComparisonTest,
114
+ ReinforcementLearningRpcTest,
115
+ ]
116
+ GENERIC_CUDA_TESTS = [
117
+ CudaRpcTest,
118
+ CudaDistAutogradTest,
119
+ CudaRemoteModuleTest,
120
+ CudaDdpComparisonTest,
121
+ ]
122
+
123
+
124
+ # This list contains test suites that will only be run on the TensorPipeAgent.
125
+ # These suites should be standalone, and separate from the ones in the generic
126
+ # list (not subclasses of those!).
127
+ TENSORPIPE_TESTS = [
128
+ TensorPipeAgentRpcTest,
129
+ TensorPipeAgentDistAutogradTest,
130
+ ]
131
+ TENSORPIPE_CUDA_TESTS = [
132
+ TensorPipeAgentCudaRpcTest,
133
+ TensorPipeCudaDistAutogradTest,
134
+ ]
135
+
136
+
137
+ # This list contains test suites that will only be run on the faulty RPC agent.
138
+ # That agent is special as it's only used to perform fault injection in order to
139
+ # verify the error handling behavior. Thus the faulty agent will only run the
140
+ # suites in this list, which were designed to test such behaviors, and not the
141
+ # ones in the generic list.
142
+ FAULTY_AGENT_TESTS = [
143
+ FaultyAgentRpcTest,
144
+ FaultyAgentDistAutogradTest,
145
+ JitFaultyAgentRpcTest,
146
+ ]
147
+
148
+
149
+ def generate_tests(
150
+ prefix: str,
151
+ mixin: Type[RpcAgentTestFixture],
152
+ tests: List[Type[RpcAgentTestFixture]],
153
+ module_name: str,
154
+ ) -> Dict[str, Type[RpcAgentTestFixture]]:
155
+ """Mix in the classes needed to autogenerate the tests based on the params.
156
+
157
+ Takes a series of test suites, each written against a "generic" agent (i.e.,
158
+ derived from the abstract RpcAgentTestFixture class), as the `tests` args.
159
+ Takes a concrete subclass of RpcAgentTestFixture, which specializes it for a
160
+ certain agent, as the `mixin` arg. Produces all combinations of them.
161
+ Returns a dictionary of class names to class type
162
+ objects which can be inserted into the global namespace of the calling
163
+ module. The name of each test will be a concatenation of the `prefix` arg
164
+ and the original name of the test suite.
165
+ The `module_name` should be the name of the calling module so
166
+ that the classes can be fixed to make it look like they belong to it, which
167
+ is necessary for pickling to work on them.
168
+ """
169
+ ret: Dict[str, Type[RpcAgentTestFixture]] = {}
170
+ for test_class in tests:
171
+ if IS_SANDCASTLE and TEST_WITH_DEV_DBG_ASAN:
172
+ print(
173
+ f'Skipping test {test_class} on sandcastle for the following reason: '
174
+ 'Skip dev-asan as torch + multiprocessing spawn have known issues', file=sys.stderr)
175
+ continue
176
+
177
+ name = f"{prefix}{test_class.__name__}"
178
+ class_ = type(name, (test_class, mixin, SpawnHelper), {})
179
+ class_.__module__ = module_name
180
+ ret[name] = class_
181
+ return ret
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/hop_db.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+ import functools
5
+ from torch.testing import make_tensor
6
+ import unittest
7
+ from functorch.experimental.control_flow import map
8
+ from torch.testing._internal.opinfo.core import (
9
+ OpInfo,
10
+ SampleInput,
11
+ )
12
+ from torch.testing._internal.common_dtype import all_types_and, custom_types
13
+ from torch.testing._internal.opinfo.core import DecorateInfo
14
+ from torch.nn.attention.flex_attention import flex_attention, _create_empty_block_mask
15
+
16
+ def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs):
17
+ make_arg = functools.partial(
18
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)
19
+ yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)],
20
+ args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2)))
21
+
22
+ def inner_f(x, y0, y1):
23
+ return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())]
24
+
25
+ def simple_map(xs, y0, y1):
26
+ def f(x, y0, y1):
27
+ return inner_f(x, y0, y1)
28
+ return map(f, xs, y0, y1)
29
+
30
+ def nested_map(xs, y0, y1):
31
+ def f1(xx, y0, y1):
32
+ def f2(x, y0, y1):
33
+ return inner_f(x, y0, y1)
34
+ return map(f2, xx, y0, y1)
35
+ return map(f1, xs, y0, y1)
36
+
37
+ def triple_nested_map(xs, y0, y1):
38
+ def f0(xs, y0, y1):
39
+ def f1(xx, y0, y1):
40
+ def f2(x, y0, y1):
41
+ return inner_f(x, y0, y1)
42
+ return map(f2, xx, y0, y1)
43
+ return map(f1, xs, y0, y1)
44
+ return map(f0, xs, y0, y1)
45
+
46
+
47
+ # Please consult with torch.export team before
48
+ # adding new entry to this list.
49
+ hop_that_doesnt_have_opinfo_test_allowlist = [
50
+ "custom_function_call",
51
+ "autograd_function_apply",
52
+ "run_and_save_rng_state",
53
+ "run_with_rng_state",
54
+ "out_dtype",
55
+ "trace_wrapped",
56
+ "map", # T183144629
57
+ "map_impl",
58
+ "with_effects",
59
+ "strict_mode",
60
+ "_export_tracepoint",
61
+ "call_torchbind",
62
+ "triton_kernel_wrapper_mutation",
63
+ "triton_kernel_wrapper_functional",
64
+ "hints_wrapper",
65
+ ]
66
+
67
+ torch.library.define(
68
+ "testlib::mutating_custom_op",
69
+ "(Tensor(a!) x, Tensor(b!) z) -> (Tensor, Tensor, Tensor)",
70
+ tags=torch.Tag.pt2_compliant_tag,
71
+ )
72
+
73
+
74
+ @torch.library.impl("testlib::mutating_custom_op", "cpu")
75
+ def foo_impl_cpu(x, z):
76
+ x.add_(5)
77
+ z.add_(5)
78
+ return x, z, x + z
79
+
80
+
81
+ @torch.library.impl("testlib::mutating_custom_op", "cuda")
82
+ def foo_impl_cuda(x, z):
83
+ x.add_(5)
84
+ z.add_(5)
85
+ return x, z, x + z
86
+
87
+
88
+ @torch.library.register_fake("testlib::mutating_custom_op")
89
+ def foo_impl_abstract(x, z):
90
+ return x, z, x + z
91
+
92
+
93
+ def sample_inputs_cond(opinfo, device, dtype, requires_grad, **kwargs):
94
+ make_arg = functools.partial(
95
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
96
+ )
97
+ yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2))
98
+
99
+
100
+ def simple_cond(x):
101
+ return torch.cond(x.sum() > 2, lambda x: (x.cos(),), lambda x: (x.sin(),), [x])
102
+
103
+
104
+ def sample_inputs_auto_functionalize(opinfo, device, dtype, requires_grad, **kwargs):
105
+ make_arg = functools.partial(
106
+ make_tensor, device=device, dtype=dtype, requires_grad=False
107
+ )
108
+ yield SampleInput(make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2))
109
+
110
+
111
+ def simple_auto_functionalize(x, z):
112
+ return torch.ops.testlib.mutating_custom_op(x, z)
113
+
114
+
115
+ def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs):
116
+ make_arg = functools.partial(
117
+ make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
118
+ )
119
+
120
+ def score_mod(score, b, h, m, n):
121
+ return score + h
122
+
123
+ q, k, v = (make_arg(2, 2, 128, 8, low=0.1, high=2) for _ in range(3))
124
+ block_mask = _create_empty_block_mask(q, k)
125
+ yield SampleInput(
126
+ q,
127
+ k,
128
+ v,
129
+ score_mod,
130
+ block_mask
131
+ )
132
+
133
+ def sample_inputs_while_loop(opinfo, device, dtype, requires_grad, **kwargs):
134
+ make_arg = functools.partial(
135
+ make_tensor, device=device, dtype=dtype, requires_grad=False
136
+ )
137
+ yield SampleInput(
138
+ torch.tensor(3),
139
+ make_arg(2, 3, 4, low=0.1, high=2),
140
+ )
141
+
142
+ def simple_while_loop(iter_t, x):
143
+ def cond_fn(iter_t, x):
144
+ return iter_t > 0
145
+
146
+ def body_fn(iter_t, x):
147
+ return iter_t - 1, x.cos()
148
+
149
+ return torch._higher_order_ops.while_loop(cond_fn, body_fn, (iter_t, x))
150
+
151
+
152
+ hop_db = [
153
+ OpInfo(
154
+ name="map",
155
+ variant_test_name="simple",
156
+ op=simple_map,
157
+ sample_inputs_func=sample_inputs_map,
158
+ dtypes=all_types_and(torch.bool, torch.half),
159
+ supports_out=False,
160
+ check_batched_grad=False,
161
+ check_batched_gradgrad=False,
162
+ check_batched_forward_grad=False,
163
+ check_inplace_batched_forward_grad=False,
164
+ ),
165
+ OpInfo(
166
+ name="map",
167
+ variant_test_name="nested",
168
+ op=nested_map,
169
+ sample_inputs_func=sample_inputs_map,
170
+ dtypes=all_types_and(torch.bool, torch.half),
171
+ supports_out=False,
172
+ check_batched_grad=False,
173
+ check_batched_gradgrad=False,
174
+ check_batched_forward_grad=False,
175
+ check_inplace_batched_forward_grad=False,
176
+ ),
177
+ OpInfo(
178
+ name="map",
179
+ variant_test_name="triple_nested",
180
+ op=triple_nested_map,
181
+ sample_inputs_func=sample_inputs_map,
182
+ dtypes=all_types_and(torch.bool, torch.half),
183
+ supports_out=False,
184
+ check_batched_grad=False,
185
+ check_batched_gradgrad=False,
186
+ check_batched_forward_grad=False,
187
+ check_inplace_batched_forward_grad=False,
188
+ ),
189
+ OpInfo(
190
+ name="cond",
191
+ variant_test_name="simple",
192
+ op=simple_cond,
193
+ sample_inputs_func=sample_inputs_cond,
194
+ dtypes=all_types_and(torch.bool, torch.half),
195
+ supports_out=False,
196
+ check_batched_grad=False,
197
+ check_batched_gradgrad=False,
198
+ check_batched_forward_grad=False,
199
+ check_inplace_batched_forward_grad=False,
200
+ supports_autograd=True,
201
+ # "torch.compile with aot_autograd does not currently support double backward."
202
+ supports_gradgrad=False,
203
+ ),
204
+ OpInfo(
205
+ name="while_loop",
206
+ variant_test_name="simple",
207
+ op=simple_while_loop,
208
+ sample_inputs_func=sample_inputs_while_loop,
209
+ dtypes=all_types_and(torch.bool, torch.half),
210
+ supports_out=False,
211
+ check_batched_grad=False,
212
+ check_batched_gradgrad=False,
213
+ check_batched_forward_grad=False,
214
+ check_inplace_batched_forward_grad=False,
215
+ supports_autograd=False,
216
+ ),
217
+ OpInfo(
218
+ name="auto_functionalize",
219
+ variant_test_name="simple",
220
+ op=simple_auto_functionalize,
221
+ sample_inputs_func=sample_inputs_auto_functionalize,
222
+ dtypes=all_types_and(torch.bool, torch.half),
223
+ supports_out=False,
224
+ check_batched_grad=False,
225
+ check_batched_gradgrad=False,
226
+ check_batched_forward_grad=False,
227
+ check_inplace_batched_forward_grad=False,
228
+ supports_autograd=False,
229
+ ),
230
+ OpInfo(
231
+ name="flex_attention",
232
+ variant_test_name="simple",
233
+ op=flex_attention,
234
+ sample_inputs_func=sample_inputs_flex_attention,
235
+ dtypes=custom_types(torch.float16, torch.float32),
236
+ supports_out=False,
237
+ check_batched_grad=False,
238
+ check_batched_gradgrad=False,
239
+ check_batched_forward_grad=False,
240
+ check_inplace_batched_forward_grad=False,
241
+ skips=(
242
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
243
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"),
244
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
245
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
246
+ ),
247
+ ),
248
+ OpInfo(
249
+ name="flex_attention_backward",
250
+ variant_test_name="simple",
251
+ op=flex_attention,
252
+ sample_inputs_func=sample_inputs_flex_attention,
253
+ dtypes=custom_types(torch.float16, torch.float32),
254
+ supports_out=False,
255
+ check_batched_grad=False,
256
+ check_batched_gradgrad=False,
257
+ check_batched_forward_grad=False,
258
+ check_inplace_batched_forward_grad=False,
259
+ skips=(
260
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_aot_export"),
261
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_pre_dispatch_export"),
262
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_serialize_export"),
263
+ DecorateInfo(unittest.expectedFailure, "TestHOP", "test_retrace_export"),
264
+ ),
265
+ )
266
+ ]
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
5
+ import torch.nn.functional as F
6
+ import torch
7
+ import torch.cuda
8
+ import torch.jit
9
+ import torch.jit._logging
10
+ import torch.jit.frontend
11
+ from torch.testing._internal.common_nn import module_tests, new_module_tests
12
+ from torch.testing._internal.common_utils import is_iterable_of_tensors, noncontiguous_like
13
+
14
+ import collections
15
+ from copy import deepcopy
16
+ from typing import Any, Dict, List, Union
17
+ import math # noqa: F401
18
+
19
+ # Testing utils
20
+ from torch import inf
21
+
22
+ assert torch.get_default_dtype() == torch.float32
23
+
24
+ L = 20
25
+ M = 10
26
+ S = 5
27
+
28
+
29
+ def unpack_variables(args):
30
+ if isinstance(args, tuple):
31
+ return tuple(unpack_variables(elem) for elem in args)
32
+ else:
33
+ return args
34
+
35
+ class dont_convert(tuple):
36
+ pass
37
+
38
+ non_differentiable = collections.namedtuple('non_differentiable', ['tensor'])
39
+
40
+ def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None):
41
+ if not isinstance(call_args, tuple):
42
+ call_args = (call_args,)
43
+
44
+ def map_arg(arg):
45
+ def maybe_non_contig(tensor):
46
+ if not non_contiguous or tensor.numel() < 2:
47
+ return tensor.clone()
48
+
49
+ return noncontiguous_like(tensor)
50
+
51
+ def conjugate(tensor):
52
+ return tensor.conj()
53
+
54
+ if isinstance(arg, (torch.Size, dont_convert)):
55
+ return arg
56
+ elif isinstance(arg, tuple) and len(arg) == 0:
57
+ var = conjugate(torch.randn((), dtype=dtype, device=device))
58
+ var.requires_grad = requires_grad
59
+ return var
60
+ elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
61
+ return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad)
62
+ # double check casting
63
+ elif isinstance(arg, non_differentiable):
64
+ if isinstance(arg.tensor, torch.Tensor):
65
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
66
+ return conjugate(maybe_non_contig(arg.tensor.to(device=device)))
67
+ elif isinstance(arg, torch.Tensor):
68
+ if arg.is_complex() != dtype.is_complex:
69
+ raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ",
70
+ "which is not supported for now")
71
+ # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
72
+ v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone()
73
+ v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())
74
+ return v
75
+ elif callable(arg):
76
+ return map_arg(arg(dtype=dtype, device=device))
77
+ else:
78
+ return arg
79
+ args_out = tuple(map_arg(arg) for arg in call_args)
80
+ kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
81
+ return args_out, kwargs_out
82
+
83
+ # NB: JIT script tests for all nn functional interfaces, script mode does
84
+ # not support in_place operations yet, so no inplace operation tests added.
85
+ # removed all the deprecated functions
86
+ #
87
+ # (
88
+ # method name,
89
+ # input size/constructing fn,
90
+ # args (tuple represents shape of a tensor arg),
91
+ # test variant name(will be used at test name suffix,
92
+ # 'inplace' skips grad tests), // optional
93
+ # (True, nonfusible_nodes, fusible_nodes) for autodiff // optional
94
+ # fn to determine if test should be skipped, // optional
95
+ # fn mapping output to part that should be gradcheck'ed, // optional
96
+ # kwargs for function, // optional
97
+ # )
98
+ nn_functional_tests = [
99
+ ('conv1d', (S, S, S), ((S, S, S),)),
100
+ ('conv2d', (S, S, S, S), ((S, S, S, S),)),
101
+ ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),
102
+ ('conv_transpose1d', (S, S, S), ((S, S, S),)),
103
+ ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),
104
+ ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),
105
+ ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),
106
+ ('avg_pool1d', (S, S, S), (3,)),
107
+ ('avg_pool2d', (S, S, S, S), (3,), '', (True,)),
108
+ ('avg_pool3d', (S, S, S, S, S), (3,)),
109
+ ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),
110
+ ('max_pool1d', (S, S, S), (2, 1)),
111
+ ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),
112
+ ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),
113
+ ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),
114
+ ('max_pool3d', (S, S, S, S, S), (2, 1)),
115
+ ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),
116
+ ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),
117
+ ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),
118
+ ('lp_pool1d', (S, S, S), (2., 3, 2,)),
119
+ ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),
120
+ ('lp_pool3d', (S, S, S, S, S), (2., 3, 2,)),
121
+ ('adaptive_max_pool1d', (S, S, S), (5,)),
122
+ ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),
123
+ ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),
124
+ ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),
125
+ ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),
126
+ ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),
127
+ ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')),
128
+ ('alpha_dropout', (S, S, S), (0.5,)),
129
+ ('dropout2d', (S, S, S), (0.5,)),
130
+ ('dropout2d', (S, S, S, S), (0.5,), 'batched'),
131
+ ('dropout3d', (S, S, S, S), (0.5,)),
132
+ ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'),
133
+ ('feature_alpha_dropout', (S, S, S), (0.5,)),
134
+ ('threshold', (S, S, S), (0.1, 2.), '', (True,)),
135
+ ('threshold', (S, S, S), (0.1, 2., True), 'inplace'),
136
+ ('relu', (S, S, S), (), '', (True,)),
137
+ ('relu', (S, S, S), (), 'inplace'),
138
+ ('glu', (S - 1, S - 1, S - 1), (),),
139
+ ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)),
140
+ ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),
141
+ ('relu6', (S, S, S), (), '', (True,)),
142
+ ('relu6', (S, S, S), (True), 'inplace'),
143
+ ('elu', (S, S, S), (0.9,),),
144
+ ('elu', (S, S, S), (0.9, True), 'inplace'),
145
+ ('selu', (S, S, S), (),),
146
+ ('selu', (S, S, S), (True), 'inplace'),
147
+ ('celu', (S, S, S), (0.9,),),
148
+ ('celu', (S, S, S), (0.9, True), 'inplace'),
149
+ ('leaky_relu', (S, S, S), (0.02,), '', (True,)),
150
+ ('leaky_relu', (S, S, S), (0.02,), 'inplace'),
151
+ ('rrelu', (S, S), (0.1, 0.3, False),),
152
+ ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),
153
+ ('hardshrink', (S, S, S), (0.4,), '', (True,)),
154
+ ('tanhshrink', (S, S, S), (),),
155
+ ('softsign', (S, S, S), (),),
156
+ ('softplus', (S, S, S), (), '', (True,)),
157
+ ('softmin', (S, S, S), (0,),),
158
+ ('softmax', (S, S, S), (0,), '', (True,)),
159
+ ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),
160
+ ('tanh', (S, S, S), (), '', (True,)),
161
+ ('sigmoid', (S, S, S), (), '', (True,)),
162
+ ('silu', (S, S, S), (), '', (True,)),
163
+ ('log_softmax', (S, S, S), (0,), '', (True,)),
164
+ ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])),
165
+ ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])),
166
+ ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),
167
+ ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),
168
+ ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),
169
+ ('batch_norm', (S, S),
170
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ),
171
+ 'training', (True, 'aten::_batch_norm_impl_index')),
172
+ ('batch_norm', (0, S, S, S),
173
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
174
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
175
+ 'size_zero', (True, 'aten::_batch_norm_impl_index')),
176
+ ('batch_norm', (0, S, S, S),
177
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
178
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
179
+ 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')),
180
+ ('batch_norm', (S, S),
181
+ (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
182
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ),
183
+ 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')),
184
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
185
+ None, non_differentiable(torch.ones(S)), True, ),
186
+ 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')),
187
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
188
+ non_differentiable(torch.randn(S)), None, True, ),
189
+ 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')),
190
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
191
+ None, None, False, ),
192
+ 'inference', (True, 'aten::_batch_norm_impl_index')),
193
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
194
+ non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ),
195
+ 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')),
196
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
197
+ None, non_differentiable(torch.ones(S)), False, ),
198
+ 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')),
199
+ ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)),
200
+ non_differentiable(torch.randn(S)), None, False, ),
201
+ 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')),
202
+ ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),
203
+ ('layer_norm', (S, S, S, S), ([5],), '',
204
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
205
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',
206
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
207
+ ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',
208
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),
209
+ ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),
210
+ non_differentiable(torch.rand(S))), 'with_weight_and_bias',
211
+ (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),
212
+ ('group_norm', (S, S, S), (1, torch.rand(5),),),
213
+ ('local_response_norm', (S, S, S), (2, ),),
214
+ ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',),
215
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),
216
+ ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),
217
+ ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),
218
+ ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),
219
+ ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),
220
+ ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
221
+ ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
222
+ ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
223
+ ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
224
+ ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
225
+ ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
226
+ ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
227
+ ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),
228
+ ('margin_ranking_loss', (S,), ((S,), (S,)),),
229
+ ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
230
+ ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
231
+ ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),
232
+ ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),
233
+ ('pixel_shuffle', (1, 9, 4, 4), (3,),),
234
+ ('pixel_unshuffle', (1, 1, 12, 12), (3,),),
235
+ ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),
236
+ ('pad', (3, 3, 4, 2), ([1, 1],),),
237
+ ('pairwise_distance', (S, S), ((S, S),),),
238
+ ('pdist', (S, S), (),),
239
+ ('cosine_similarity', (S, S), ((S, S),),),
240
+ ('triplet_margin_loss', (S, S), ((S, S), (S, S)),),
241
+ ('normalize', (S, S, S), (),),
242
+ ('unfold', (S, S, S, S), ([2, 3]),),
243
+ ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),
244
+ ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),
245
+ ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
246
+ ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),
247
+ ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),
248
+ ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),
249
+ 1, 1., non_differentiable(torch.randn(S))),),
250
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),
251
+ non_differentiable(torch.randn(3, 2))),),
252
+ ('binary_cross_entropy', torch.randn(3, 2).sigmoid(),
253
+ (non_differentiable(torch.rand(3, 2)),
254
+ non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),
255
+ ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),
256
+ (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),
257
+ torch.randint(1, S, (S,), dtype=torch.long))),
258
+ ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),
259
+ ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),
260
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),
261
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),
262
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),
263
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),
264
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),
265
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),
266
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),
267
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),
268
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),
269
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),
270
+ ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),
271
+ ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),
272
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),
273
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),
274
+ ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),
275
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),
276
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),
277
+ ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),
278
+ ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),
279
+ ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),
280
+ ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),
281
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),
282
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),
283
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),
284
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),
285
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),
286
+ ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),
287
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),
288
+ ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),
289
+ ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),
290
+ 'nearest_4d_not_recompute_scale_factor'),
291
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),
292
+ 'nearest_4d_with_size_not_recompute_scale_factor'),
293
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),
294
+ 'bilinear_4d_with_scale_not_recompute_scale_factor'),
295
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),
296
+ 'bilinear_4d_with_size_not_recompute_scale_factor'),
297
+ ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),
298
+ 'bicubic_4d_with_scale_not_recompute_scale_factor'),
299
+ ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),
300
+ 'bicubic_4d_with_size_not_recompute_scale_factor'),
301
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),
302
+ 'nearest_3d_with_scale_not_recompute_scale_factor'),
303
+ ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),
304
+ 'nearest_3d_with_size_not_recompute_scale_factor'),
305
+ ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),
306
+ 'linear_3d_with_scale_not_recompute_scale_factor'),
307
+ ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),
308
+ 'linear_3d_with_size_not_recompute_scale_factor'),
309
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),
310
+ 'nearest_5d_with_scale_not_recompute_scale_factor'),
311
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),
312
+ 'nearest_5d_with_size_not_recompute_scale_factor'),
313
+ ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),
314
+ 'trilinear_5d_with_scale_not_recompute_scale_factor'),
315
+ ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),
316
+ 'trilinear_5d_with_size_not_recompute_scale_factor'),
317
+ ]
318
+
319
+ script_template = '''
320
+ def the_method({}):
321
+ return {}
322
+ '''
323
+
324
+ def value_to_literal(value):
325
+ if isinstance(value, str):
326
+ # Quotes string and escapes special characters
327
+ return ascii(value)
328
+ if isinstance(value, torch.Tensor):
329
+ return 'torch.' + str(value)
330
+ else:
331
+ return str(value)
332
+
333
+ def get_call(method_name, func_type, args, kwargs):
334
+ kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()])
335
+ self_arg = args[0]
336
+ if func_type == 'method':
337
+ args = args[1:]
338
+
339
+ argument_str = ', '.join(args)
340
+ argument_str += ', ' if len(args) and len(kwargs) else ''
341
+ argument_str += kwargs_str
342
+
343
+ if func_type == 'functional' or func_type == 'function':
344
+ call = f'torch.{method_name}({argument_str})'
345
+ elif func_type == 'method':
346
+ call = f'{self_arg}.{method_name}({argument_str})'
347
+ elif func_type == 'nn_functional':
348
+ call = f'torch.nn.functional.{method_name}({argument_str})'
349
+ else:
350
+ raise TypeError('Unsupported function type')
351
+
352
+ return call
353
+
354
+ def get_constant(x):
355
+ if x == inf:
356
+ return 'math.inf'
357
+ if x == -inf:
358
+ return '-math.inf'
359
+ return x
360
+
361
+ def get_script_args(args):
362
+ formals: List[str] = []
363
+ tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = []
364
+ actuals: List[str] = []
365
+ for arg in args:
366
+ if isinstance(arg, torch.Tensor):
367
+ name = f'i{len(formals)}'
368
+ formals.append(name)
369
+ actuals.append(name)
370
+ tensors.append(arg)
371
+ elif is_iterable_of_tensors(arg):
372
+ name = f'i{len(formals)}'
373
+ formals.append(name + ': List[torch.Tensor]')
374
+ actuals.append(name)
375
+ tensors.append(list(arg))
376
+ elif isinstance(arg, str):
377
+ actuals.append(f"'{arg}'")
378
+ else:
379
+ actuals.append(str(get_constant(arg)))
380
+ return (formals, tensors, actuals)
381
+
382
+ # create a script function from (name, func_type, output_process_fn),
383
+ # and returns the compiled function and example inputs
384
+ def gen_script_fn_and_args(method_name, func_type, *args, **kwargs):
385
+ formals, tensors, actuals = get_script_args(args)
386
+ call = get_call(method_name, func_type, actuals, kwargs)
387
+ script = script_template.format(', '.join(formals), call)
388
+ CU = torch.jit.CompilationUnit(script)
389
+ return CU.the_method, tensors
390
+
391
+ # create a script function from (name, func_type),
392
+ # returns a function takes in (args, kwargs) and runs the compiled function
393
+ def create_script_fn(self, method_name, func_type):
394
+ # function returns tuple containing original output and
395
+ # filtered output to be used in checking gradients
396
+ def script_fn(*args, **kwargs):
397
+ fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)
398
+ self.assertExportImport(fn.graph, tensors)
399
+ output = fn(*tensors)
400
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
401
+ script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined]
402
+ return output
403
+ return script_fn
404
+
405
+ class SplitInputs:
406
+ all_tensors: List[Any]
407
+ tensor_args: List[Any]
408
+ nontensor_args: List[Any]
409
+ arg_types: List[str]
410
+ tensor_kwargs: Dict[str, Any]
411
+ kwarg_order: List[str]
412
+ nontensor_kwargs: Dict[str, Any]
413
+ kwarg_types: Dict[str, Any]
414
+
415
+ @staticmethod
416
+ def _is_tensor_input(arg):
417
+ return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg)
418
+
419
+ def __init__(self, args, kwargs):
420
+ self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args]
421
+ self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()}
422
+ self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)]
423
+ self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)]
424
+ self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)}
425
+ self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)}
426
+ self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]]
427
+ self.kwarg_order = [k for k, v in kwargs.items()]
428
+
429
+ def nontensors_match(self, other: 'SplitInputs'):
430
+ if self.arg_types != other.arg_types:
431
+ return False
432
+ if self.kwarg_types != other.kwarg_types:
433
+ return False
434
+ if self.kwarg_order != other.kwarg_order:
435
+ return False
436
+ if self.nontensor_args != other.nontensor_args:
437
+ return False
438
+ if self.nontensor_kwargs != other.nontensor_kwargs:
439
+ return False
440
+ return True
441
+
442
+ # make a new function where all non-tensor arguments in 'args' have been partially
443
+ # applied, and all tensor arguments remain.
444
+ # used to trace functions when some arguments are not tensors
445
+ def partial_apply_nontensors(fn, args, kwargs):
446
+ inputs = SplitInputs(args, kwargs)
447
+
448
+ def new_fn(*tensors_):
449
+ tensors = iter(tensors_)
450
+ full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)]
451
+ full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()}
452
+ return fn(*full_args, **full_kwargs)
453
+
454
+ return new_fn, inputs
455
+
456
+ # create a trace function from input fn
457
+ def create_traced_fn(self, fn, cache_traced_fn=False):
458
+ def traced_fn(*inputs, **kwargs):
459
+ # `check_trace` is set to False because check_trace is run with @no_grad
460
+ # Also, `check_against_reference` already does all the checks
461
+ # against python function
462
+ fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs)
463
+ if not cache_traced_fn or not hasattr(traced_fn, 'traced'):
464
+ traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False)
465
+ self.assertExportImport(traced.graph, split_inputs.all_tensors)
466
+ output = traced(*split_inputs.all_tensors)
467
+ if cache_traced_fn:
468
+ traced_fn.traced = traced
469
+ traced_fn.split_inputs = split_inputs
470
+ else:
471
+ # Guard to check that nontensor inputs are the same as during tracing
472
+ self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs))
473
+ output = traced_fn.traced(*split_inputs.all_tensors)
474
+ traced = traced_fn.traced
475
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
476
+ traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined]
477
+ traced_fn.graph = traced.graph # type: ignore[attr-defined]
478
+ return output
479
+ return traced_fn
480
+
481
+ # known to be failing in script
482
+ EXCLUDE_SCRIPT = {
483
+ 'test_norm_fro_default',
484
+ 'test_norm_fro_cpu',
485
+ 'test_norm_nuc',
486
+ 'test_norm_fro',
487
+ 'test_norm_nuc_batched',
488
+
489
+ # aten op has additional cudnn argument
490
+ 'test_nn_unfold',
491
+
492
+ # flaky test - TODO fix
493
+ 'test_nn_ctc_loss',
494
+
495
+ # unknown builtin op
496
+ 'test_nn_fold',
497
+
498
+ # jit doesn't support sparse tensors.
499
+ 'test_to_sparse',
500
+ 'test_to_sparse_dim',
501
+ }
502
+
503
+ # generates a script function and set of example inputs
504
+ # from a specified test in the format of nn_functional_tests
505
+ def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):
506
+ test_name = 'test_nn_' + name
507
+
508
+ if variant_name != '':
509
+ test_name = test_name + '_' + variant_name
510
+
511
+ no_grad = variant_name == 'inplace'
512
+
513
+ self_variable = create_input((self_size,))[0][0]
514
+ kwargs = None
515
+
516
+ # need to record this because methods can change the size (e.g. unsqueeze)
517
+ args_variable, kwargs_variable = create_input(args)
518
+
519
+ self_tensor = deepcopy(self_variable.data)
520
+ args_tensor = deepcopy(unpack_variables(args_variable))
521
+
522
+ f_args_variable = (self_variable,) + args_variable
523
+ f_args_tensor = (self_tensor,) + args_tensor
524
+ with torch._jit_internal._disable_emit_hooks():
525
+ script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable)
526
+ return script_fn, inputs
527
+
528
+
529
+ # additional modules test
530
+ # TODO: delete this list once we make all nn_tests work
531
+ additional_module_tests = [
532
+ {
533
+ 'module_name': 'Bilinear',
534
+ 'constructor_args': (S, S, M),
535
+ 'input_size': (S, S),
536
+ 'extra_args': ((S, S),)
537
+ },
538
+ {
539
+ 'module_name': 'RNNCell',
540
+ 'constructor_args': (S, S),
541
+ 'input_size': (S, S),
542
+ },
543
+ {
544
+ 'module_name': 'LSTMCell',
545
+ 'constructor_args': (S, S),
546
+ 'input_size': (S, S),
547
+ },
548
+ {
549
+ 'module_name': 'GRUCell',
550
+ 'constructor_args': (S, S),
551
+ 'input_size': (S, S),
552
+ },
553
+ {
554
+ 'module_name': 'MultiheadAttention',
555
+ 'constructor_args': (128, 8),
556
+ 'input_size': (10, 8, 128),
557
+ 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),
558
+ 'slowTest': True
559
+ },
560
+ {
561
+ 'module_name': 'Transformer',
562
+ 'constructor_args': (1, 1, 1, 1, 2),
563
+ 'input_size': (3, 1, 1),
564
+ 'extra_args': (torch.randn(1, 1, 1),),
565
+ 'slowTest': True
566
+ }
567
+ ]
568
+
569
+ EXCLUDE_SCRIPT_MODULES = {
570
+ 'test_nn_AdaptiveAvgPool2d_tuple_none',
571
+ 'test_nn_AdaptiveAvgPool3d_tuple_none',
572
+ 'test_nn_AdaptiveMaxPool2d_tuple_none',
573
+ 'test_nn_AdaptiveMaxPool3d_tuple_none',
574
+
575
+ # Doesn't use future division, so this is not supported
576
+ 'test_nn_CrossMapLRN2d',
577
+ # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented
578
+ 'test_nn_TransformerDecoderLayer_gelu_activation',
579
+ 'test_nn_TransformerDecoderLayer_relu_activation',
580
+ 'test_nn_TransformerEncoderLayer_gelu_activation',
581
+ 'test_nn_TransformerEncoderLayer_relu_activation',
582
+ 'test_nn_Transformer_multilayer_coder',
583
+ }
584
+
585
+ script_method_template = '''
586
+ def forward({}):
587
+ return {}
588
+ '''
589
+
590
+ def create_script_module(self, nn_module, constructor_args, *args, **kwargs):
591
+ def script_module(*args, **kwargs):
592
+ formals, tensors, actuals = get_script_args(args)
593
+
594
+ method_args = ', '.join(['self'] + actuals)
595
+ call_args_str = ', '.join(actuals)
596
+ call = f"self.submodule({call_args_str})"
597
+ script = script_method_template.format(method_args, call)
598
+
599
+ submodule_constants = []
600
+ if kwargs.get('is_constant'):
601
+ submodule_constants = ['submodule']
602
+
603
+ # Create module to use the script method
604
+ class TheModule(torch.jit.ScriptModule):
605
+ __constants__ = submodule_constants
606
+
607
+ def __init__(self) -> None:
608
+ super().__init__()
609
+ self.submodule = nn_module(*constructor_args)
610
+
611
+ def make_module(script):
612
+ module = TheModule()
613
+ # check __repr__
614
+ str(module)
615
+ module.define(script)
616
+ return module
617
+
618
+ module = make_module(script)
619
+ if self:
620
+ self.assertExportImportModule(module, tensors)
621
+ module(*args)
622
+ # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087
623
+ create_script_module.last_graph = module.graph # type: ignore[attr-defined]
624
+ return module
625
+ return script_module
626
+
627
+ def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'):
628
+ formals, tensors, actuals = get_script_args(args)
629
+ call = get_call(method_name, func_type, actuals, kwargs)
630
+ script = script_template.format(', '.join(formals), call)
631
+ CU = torch.jit.CompilationUnit(script)
632
+ # to clean up IR
633
+ torch._C._jit_pass_inline(CU.the_method.graph)
634
+ torch._C._jit_pass_constant_propagation(CU.the_method.graph)
635
+ torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name)
636
+
637
+ def get_nn_module_name_from_kwargs(**kwargs):
638
+ if 'module_name' in kwargs:
639
+ return kwargs['module_name']
640
+ elif 'fullname' in kwargs:
641
+ return kwargs['fullname']
642
+ elif 'constructor' in kwargs:
643
+ return kwargs['constructor'].__name__
644
+
645
+ def get_nn_mod_test_name(**kwargs):
646
+ if 'fullname' in kwargs:
647
+ test_name = kwargs['fullname']
648
+ else:
649
+ test_name = get_nn_module_name_from_kwargs(**kwargs)
650
+ if 'desc' in kwargs:
651
+ test_name = f"{test_name}_{kwargs['desc']}"
652
+ return f'test_nn_{test_name}'
653
+
654
+ def get_nn_module_class_from_kwargs(**kwargs):
655
+ name = get_nn_module_name_from_kwargs(**kwargs)
656
+ index = name.find("_")
657
+ if index == -1:
658
+ return name
659
+ else:
660
+ return name[0:name.find("_")]
661
+
662
+ def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):
663
+ name = get_nn_module_name_from_kwargs(**kwargs)
664
+
665
+ if 'desc' in kwargs and 'eval' in kwargs['desc']:
666
+ # eval() is not supported, so skip these tests
667
+ return
668
+
669
+ test_name = name
670
+ if 'desc' in kwargs:
671
+ test_name = f"{test_name}_{kwargs['desc']}"
672
+ test_name = get_nn_mod_test_name(**kwargs)
673
+
674
+ if test_name in EXCLUDE_SCRIPT_MODULES:
675
+ return
676
+ if 'constructor' in kwargs:
677
+ nn_module = kwargs['constructor']
678
+ else:
679
+ nn_module = getattr(torch.nn, name)
680
+
681
+ if "FunctionalModule" in str(nn_module):
682
+ return
683
+
684
+ if 'constructor_args_fn' in kwargs:
685
+ constructor_args = kwargs['constructor_args_fn']()
686
+ else:
687
+ constructor_args = kwargs.get('constructor_args', ())
688
+
689
+ # Set up inputs from tuple of sizes or constructor fn
690
+ input_dtype = torch.double
691
+ if 'input_fn' in kwargs:
692
+ input = kwargs['input_fn']()
693
+ if isinstance(input, torch.Tensor):
694
+ input = (input,)
695
+
696
+ if all(tensor.is_complex() for tensor in input):
697
+ input_dtype = torch.cdouble
698
+ else:
699
+ input = (kwargs['input_size'],)
700
+
701
+ # Extra parameters to forward()
702
+ if 'extra_args' in kwargs:
703
+ input = input + kwargs['extra_args']
704
+
705
+ if 'target_size' in kwargs:
706
+ input = input + (kwargs['target_size'],)
707
+ elif 'target_fn' in kwargs:
708
+ if torch.is_tensor(input):
709
+ input = (input,)
710
+ input = input + (kwargs['target_fn'](),)
711
+
712
+ args_variable, kwargs_variable = create_input(input, dtype=input_dtype)
713
+ f_args_variable = deepcopy(unpack_variables(args_variable))
714
+ out_var = deepcopy(f_args_variable)
715
+
716
+ args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)
717
+
718
+ return mod, out_var
719
+
720
+
721
+ def get_all_nn_module_tests():
722
+ return module_tests + new_module_tests + additional_module_tests
infer_4_47_1/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ # Torch
4
+ from torch.autograd import Variable
5
+ from torch.autograd.function import _nested_map
6
+ from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401
7
+
8
+ from torch.onnx import OperatorExportTypes
9
+ import torch
10
+ import torch.cuda
11
+ import torch.jit
12
+ import torch.jit._logging
13
+ import torch.jit.frontend
14
+ import torch.jit.quantized
15
+ import zipfile
16
+ import functools
17
+
18
+ # Testing utils
19
+ from torch.testing import FileCheck
20
+ from torch.testing._internal.common_utils import IS_WINDOWS, \
21
+ freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \
22
+ is_iterable_of_tensors
23
+ from torch.testing._internal.common_jit import JitCommonTestCase
24
+ from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401
25
+
26
+ # Standard library
27
+ from contextlib import contextmanager
28
+ from functools import reduce
29
+ from io import StringIO
30
+ from collections import defaultdict
31
+
32
+ import importlib.util
33
+ import inspect
34
+ import io
35
+ import math
36
+ import os
37
+ import pickle
38
+ import sys
39
+ import tempfile
40
+ import textwrap
41
+ from importlib.abc import Loader
42
+ from typing import Any, Dict, List, Tuple, Union
43
+
44
+ RUN_CUDA = torch.cuda.is_available()
45
+ RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
46
+ RUN_CUDA_HALF = RUN_CUDA
47
+ # HIP supports half, no version check necessary
48
+ if torch.cuda.is_available() and not torch.version.hip:
49
+ CUDA_VERSION = torch._C._cuda_getCompiledVersion()
50
+ for d in range(torch.cuda.device_count()):
51
+ major = torch.cuda.get_device_capability(d)[0]
52
+ if (major < 6):
53
+ RUN_CUDA_HALF = False
54
+
55
+ def execWrapper(code, glob, loc):
56
+ exec(code, glob, loc)
57
+
58
+ def do_input_map(fn, input):
59
+ return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input)
60
+
61
+ def clear_class_registry():
62
+ torch._C._jit_clear_class_registry()
63
+ torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
64
+ torch.jit._state._clear_class_state()
65
+
66
+ def get_execution_plan(graph_executor_state):
67
+ execution_plans = list(graph_executor_state.execution_plans.values())
68
+ num_plans = len(execution_plans)
69
+ if num_plans != 1:
70
+ raise RuntimeError('This test assumes this GraphExecutor should '
71
+ f'only have one execution plan, got: {num_plans}')
72
+ return execution_plans[0]
73
+
74
+ class _AssertRaisesRegexWithHighlightContext:
75
+ """
76
+ A context manager that is useful for checking that error messages highlight
77
+ the correct part of the source code.
78
+ """
79
+
80
+ def __init__(self, test_case, exception, regex, highlight):
81
+ self.test_case = test_case
82
+ self.exception_type = exception
83
+ self.regex = regex
84
+ self.highlight = highlight
85
+
86
+ def __enter__(self):
87
+ return self
88
+
89
+ def __exit__(self, type, value, traceback):
90
+ with self.test_case.assertRaisesRegex(self.exception_type, self.regex):
91
+ if type:
92
+ raise value
93
+
94
+ if self.highlight:
95
+ FileCheck().check_source_highlighted(self.highlight).run(str(value))
96
+
97
+ return True
98
+
99
+ FUSION_GROUP = "prim::TensorExprGroup"
100
+
101
+ class JitTestCase(JitCommonTestCase):
102
+ _do_cuda_memory_leak_check = True
103
+ _restored_warnings = False
104
+
105
+ class capture_stdout(list):
106
+ """
107
+ Replace sys.stdout with a temporary StringIO
108
+ """
109
+ def __enter__(self):
110
+ self.sys_stdout = sys.stdout
111
+ self.stringio = StringIO()
112
+ sys.stdout = self.stringio
113
+ return self
114
+
115
+ def __exit__(self, *args):
116
+ self.append(str(self.stringio.getvalue()))
117
+ del self.stringio
118
+ sys.stdout = self.sys_stdout
119
+
120
+ class capture_stderr(list):
121
+ """
122
+ Replace sys.stderr with a temporary StringIO
123
+ """
124
+ def __enter__(self):
125
+ self.sys_stderr = sys.stderr
126
+ self.stringio = StringIO()
127
+ sys.stderr = self.stringio
128
+ return self
129
+
130
+ def __exit__(self, *args):
131
+ self.append(str(self.stringio.getvalue()))
132
+ del self.stringio
133
+ sys.stderr = self.sys_stderr
134
+
135
+ def setHooks(self):
136
+ torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook)
137
+
138
+ def clearHooks(self):
139
+ torch._C._jit_set_emit_hooks(None, None)
140
+
141
+ def setUp(self):
142
+ super().setUp()
143
+ # unittest overrides all warning filters and forces all of them to show up
144
+ # after we install our own to silence those coming from inside PyTorch.
145
+ # This will ensure that our filter still takes precedence.
146
+ if not JitTestCase._restored_warnings:
147
+ torch.jit.TracerWarning.ignore_lib_warnings()
148
+ JitTestCase._restored_warnings = True
149
+ self.setHooks()
150
+
151
+ def tearDown(self):
152
+ super().tearDown()
153
+ # needs to be cleared because python might be unloaded before
154
+ # the callback gets destructed
155
+ self.clearHooks()
156
+ clear_class_registry()
157
+
158
+ def assertAllFused(self, graph, except_for=()):
159
+
160
+ # note this helper collects nodes on 'fast path' only
161
+ # i.e. the true blocks of specialized checks
162
+ def get_nodes_and_parents_recursively(block, kind, acc):
163
+ for node in block.nodes():
164
+ if node.kind() == kind:
165
+ acc[block].append(node)
166
+ elif node.kind() == 'prim::DifferentiableGraph':
167
+ get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc)
168
+ elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or
169
+ node.inputs().__next__().node().kind() == 'prim::TypeCheck' or
170
+ node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'):
171
+ get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc)
172
+ else:
173
+ for inner_block in node.blocks():
174
+ get_nodes_and_parents_recursively(inner_block, kind, acc)
175
+
176
+ allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate',
177
+ 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for)
178
+
179
+ fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list)
180
+ get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups)
181
+ self.assertTrue(len(fusion_groups) == 1, f'got {graph}')
182
+ (graph, fusion_nodes) = next(iter(fusion_groups.items()))
183
+ # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes`
184
+ self.assertTrue(len(fusion_nodes) == 1, f'got {graph}')
185
+ self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()),
186
+ f'got {graph}')
187
+
188
+ def _isHookExceptionOk(self, e):
189
+ se = str(e)
190
+ allowed = ("Could not export Python function",
191
+ "closures are not exportable")
192
+ for a in allowed:
193
+ if a in se:
194
+ return True
195
+ return False
196
+
197
+ def _compared_saved_loaded(self, m):
198
+ def extract_files(buffer):
199
+ # crack open the zip format to get at the main module code
200
+ archive = zipfile.ZipFile(buffer)
201
+ # check that we have no duplicate names
202
+ self.assertEqual(len(set(archive.namelist())), len(archive.namelist()))
203
+ files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist()))
204
+ # unwrap all the code files into strings
205
+ code_files_str = filter(lambda x: x.endswith('.py'), files)
206
+ code_files_stream = (archive.open(f) for f in code_files_str)
207
+ code_files = ("".join([line.decode() for line in file]) for file in code_files_stream)
208
+
209
+ # unpickled all the debug files
210
+ debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files)
211
+ debug_files_stream = (archive.open(f) for f in debug_files_str)
212
+ debug_files = (pickle.load(f) for f in debug_files_stream)
213
+ return code_files, debug_files
214
+
215
+ # disable the hook while we parse code, otherwise we will re-enter the hook
216
+ with torch._jit_internal._disable_emit_hooks():
217
+ try:
218
+ # short-circuit if this is an empty function or module
219
+ if len(m.code) == 0:
220
+ return
221
+ if isinstance(m, torch._C.ScriptModule):
222
+ if len(m._method_names()) == 0:
223
+ return
224
+
225
+ # save the module to a buffer
226
+ buffer = io.BytesIO()
227
+ torch.jit.save(m, buffer)
228
+ # copy the data in the buffer so we can restore it later. This
229
+ # is because py2 and py3 have different semantics with zipfile
230
+ # and it's easier to just work with a fresh copy each time.
231
+ buffer_copy = buffer.getvalue()
232
+
233
+ code_files, debug_files = extract_files(buffer)
234
+
235
+ except RuntimeError as e:
236
+ if not self._isHookExceptionOk(e):
237
+ raise
238
+ else:
239
+ return
240
+
241
+ # import the model again (from a the copy we made of the original)
242
+ buffer2 = io.BytesIO(buffer_copy)
243
+ imported = torch.jit.load(buffer2)
244
+
245
+ # save it again
246
+ saved_module_buffer_2 = io.BytesIO()
247
+ torch.jit.save(imported, saved_module_buffer_2)
248
+
249
+ saved_module_buffer_2.seek(0)
250
+ code_files_2, debug_files_2 = extract_files(saved_module_buffer_2)
251
+
252
+ for a, b in zip(code_files, code_files_2):
253
+ self.assertMultiLineEqual(a, b)
254
+
255
+ if isinstance(m, torch._C.ScriptModule):
256
+ self.assertTrue(torch._C._ivalue_tags_match(m, imported._c))
257
+
258
+
259
+ def emitFunctionHook(self, func):
260
+ # func has invalid names for export, skip the jitter check
261
+ if func.name == "<lambda>" or "aten::" in func.name:
262
+ return
263
+ self._compared_saved_loaded(func)
264
+
265
+ def emitModuleHook(self, module):
266
+ self._compared_saved_loaded(module)
267
+
268
+
269
+ def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None):
270
+ buffer = io.BytesIO()
271
+ m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None)
272
+ torch.jit.save(m, buffer)
273
+ m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
274
+ buffer.seek(0)
275
+ imported = torch.jit.load(buffer, map_location=map_location)
276
+ imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
277
+
278
+ if not also_test_file:
279
+ return imported
280
+
281
+ # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
282
+ # opens the file, and it cannot be opened multiple times in Windows. To support Windows,
283
+ # close the file after creation and try to remove it manually
284
+ f = tempfile.NamedTemporaryFile(delete=False)
285
+ try:
286
+ f.close()
287
+ imported.save(f.name)
288
+ result = torch.jit.load(f.name, map_location=map_location)
289
+ finally:
290
+ os.unlink(f.name)
291
+
292
+ result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None)
293
+ return result
294
+
295
+ def assertGraphContains(self, graph, kind, consider_subgraphs=False):
296
+
297
+ if consider_subgraphs:
298
+ strgraph = str(graph)
299
+ count = strgraph.count(kind) - strgraph.count(f'with {kind}')
300
+ self.assertTrue(count > 0)
301
+ return
302
+
303
+ def nodes(block):
304
+ out = []
305
+ for node in block.nodes():
306
+ if node.kind() == kind:
307
+ out.append(node)
308
+ for block in node.blocks():
309
+ out += nodes(block)
310
+ return out
311
+
312
+ out_nodes = nodes(graph)
313
+ self.assertTrue(len(out_nodes) > 0)
314
+
315
+ def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False):
316
+ def perform_assert(graph, kind, actual, expected, consider_subgraphs):
317
+ if actual == expected:
318
+ return
319
+ subgraph = 'including' if consider_subgraphs else 'excluding'
320
+ raise AssertionError(
321
+ f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}')
322
+
323
+ if consider_subgraphs:
324
+ strgraph = str(graph)
325
+ count = strgraph.count(kind) - strgraph.count(f'with {kind}')
326
+ perform_assert(graph, kind, count, num_kind_nodes,
327
+ consider_subgraphs)
328
+ return
329
+
330
+ def nodes(block):
331
+ out = []
332
+ for node in block.nodes():
333
+ if node.kind() == kind:
334
+ out.append(node)
335
+ for block in node.blocks():
336
+ out += nodes(block)
337
+ return out
338
+
339
+ out_nodes = nodes(graph)
340
+ perform_assert(graph, kind, len(out_nodes), num_kind_nodes,
341
+ consider_subgraphs)
342
+
343
+ def assertExpectedONNXGraph(self, g, *args, **kwargs):
344
+ g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX)
345
+ self.assertExpectedGraph(g, *args, **kwargs)
346
+
347
+ def assertExpectedGraph(self, trace, *args, **kwargs):
348
+ if isinstance(trace, torch._C.Graph):
349
+ graph = trace
350
+ else:
351
+ graph = trace.graph()
352
+
353
+ torch._C._jit_pass_lint(graph)
354
+ torch._C._jit_pass_dce(graph)
355
+ torch._C._jit_pass_lint(graph)
356
+ graph = torch._C._jit_pass_canonicalize(graph)
357
+ torch._C._jit_pass_lint(graph)
358
+ self.assertExpected(str(graph), *args, **kwargs)
359
+
360
+ def run_pass(self, name, trace):
361
+ if isinstance(trace, torch._C.Graph):
362
+ graph = trace
363
+ set_graph = False
364
+ else:
365
+ set_graph = True
366
+ graph = trace.graph()
367
+
368
+ torch._C._jit_pass_lint(graph)
369
+ result = getattr(torch._C, '_jit_pass_' + name)(graph)
370
+ if result is not None and not isinstance(result, bool):
371
+ graph = result
372
+ torch._C._jit_pass_lint(graph)
373
+
374
+ if set_graph:
375
+ trace.set_graph(graph)
376
+ return graph
377
+
378
+ def get_frame_vars(self, frames_up):
379
+ frame = inspect.currentframe()
380
+ if not frame:
381
+ raise RuntimeError("failed to inspect frame")
382
+ i = 0
383
+ while i < frames_up + 1:
384
+ frame = frame.f_back
385
+ if not frame:
386
+ raise RuntimeError("failed to get frame")
387
+ i += 1
388
+ defined_vars: Dict[str, Any] = {}
389
+ defined_vars.update(frame.f_locals)
390
+ defined_vars.update(frame.f_globals)
391
+ return defined_vars
392
+
393
+ def assertRaisesRegexWithHighlight(self, exception, regex, highlight):
394
+ return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight)
395
+
396
+ def checkScriptRaisesRegex(self, script, inputs, exception, regex,
397
+ name=None, outputs=None, capture_output=False,
398
+ frames_up=1, profiling=ProfilingMode.PROFILING):
399
+ """
400
+ Checks that a given function will throw the correct exception,
401
+ when executed with normal python, the string frontend, and the
402
+ AST frontend. Logic taken from `checkScript` (see comments there
403
+ for details)
404
+ """
405
+ with enable_profiling_mode_for_profiling_tests():
406
+ # Normal Python
407
+ with self.assertRaisesRegex(exception, regex):
408
+ if isinstance(script, str):
409
+ frame = self.get_frame_vars(frames_up)
410
+ the_locals: Dict[str, Any] = {}
411
+ execWrapper(script, glob=frame, loc=the_locals)
412
+ frame.update(the_locals)
413
+
414
+ python_fn = frame[name]
415
+ else:
416
+ python_fn = script
417
+
418
+ python_fn(*inputs)
419
+
420
+ # String frontend
421
+ with self.assertRaisesRegex(exception, regex):
422
+ if isinstance(script, str):
423
+ cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
424
+ string_frontend = getattr(cu, name)
425
+ else:
426
+ source = textwrap.dedent(inspect.getsource(script))
427
+ cu = torch.jit.CompilationUnit(source, _frames_up=frames_up)
428
+ string_frontend = getattr(cu, script.__name__)
429
+
430
+ string_frontend(*inputs)
431
+
432
+ # Python AST frontend
433
+ if not isinstance(script, str):
434
+ with self.assertRaisesRegex(exception, regex):
435
+ ge = torch.jit.script(python_fn)
436
+ ge(*inputs)
437
+
438
+ def checkBailouts(self, model, inputs, expected):
439
+ state = model.get_debug_state()
440
+ plan = get_execution_plan(state)
441
+ num_bailouts = plan.code.num_bailouts()
442
+ for i in range(0, num_bailouts):
443
+ plan.code.request_bailout(i)
444
+ bailout_outputs = model(*inputs)
445
+ self.assertEqual(bailout_outputs, expected)
446
+
447
+ def checkScript(self,
448
+ script,
449
+ inputs,
450
+ name='func',
451
+ optimize=True,
452
+ inputs_requires_grad=False,
453
+ capture_output=False,
454
+ frames_up=1,
455
+ profiling=ProfilingMode.PROFILING,
456
+ atol=None,
457
+ rtol=None):
458
+ """
459
+ Checks that a given script generates the same output as the Python
460
+ version using the given inputs.
461
+ """
462
+ with torch.jit.optimized_execution(optimize):
463
+ with enable_profiling_mode_for_profiling_tests():
464
+ extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs)
465
+ if isinstance(script, str):
466
+ # Compile the string to a Script function
467
+ # with enable_profiling_mode():
468
+ cu = torch.jit.CompilationUnit(script, _frames_up=frames_up)
469
+
470
+ # Execute the Python function so we can run it later and get its
471
+ # outputs
472
+
473
+ frame = self.get_frame_vars(frames_up)
474
+ the_locals: Dict[str, Any] = {}
475
+ execWrapper(script, glob=frame, loc=the_locals)
476
+ frame.update(the_locals)
477
+
478
+ python_fn = frame[name]
479
+ scripted_fn = getattr(cu, name)
480
+ else:
481
+
482
+ # Check the string frontend first
483
+ source = textwrap.dedent(inspect.getsource(script))
484
+ self.checkScript(
485
+ source,
486
+ inputs,
487
+ script.__name__,
488
+ optimize=optimize,
489
+ inputs_requires_grad=inputs_requires_grad,
490
+ capture_output=capture_output,
491
+ profiling=profiling,
492
+ frames_up=2)
493
+
494
+ # Continue checking the Python frontend
495
+ scripted_fn = torch.jit.script(script, _frames_up=1)
496
+ python_fn = script
497
+
498
+ if inputs_requires_grad:
499
+ recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs)
500
+ else:
501
+ recording_inputs = inputs
502
+
503
+ if capture_output:
504
+ with self.capture_stdout() as script_stdout:
505
+ script_outputs = scripted_fn(*recording_inputs)
506
+ with self.capture_stdout() as opt_script_stdout:
507
+ opt_script_outputs = scripted_fn(*recording_inputs)
508
+ with self.capture_stdout() as _python_stdout:
509
+ python_outputs = python_fn(*inputs)
510
+ if not IS_WINDOWS:
511
+ self.assertExpected(script_stdout[0], subname='stdout')
512
+ self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol)
513
+ else:
514
+ # profiling run
515
+ script_outputs = scripted_fn(*recording_inputs)
516
+ if inputs_requires_grad or extra_profile_runs:
517
+ opt_script_outputs = scripted_fn(*recording_inputs)
518
+ # optimized run
519
+ opt_script_outputs = scripted_fn(*recording_inputs)
520
+ if TEST_BAILOUTS:
521
+ self.checkBailouts(scripted_fn, inputs, opt_script_outputs)
522
+ python_outputs = python_fn(*inputs)
523
+ self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol)
524
+ self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol)
525
+ return scripted_fn
526
+
527
+ def checkTrace(self, func, reference_tensors, input_tensors=None,
528
+ drop=None, allow_unused=False, verbose=False,
529
+ inputs_require_grads=True, check_tolerance=1e-5, export_import=True,
530
+ _force_outplace=False, grad_atol=None, grad_rtol=None):
531
+
532
+ # TODO: check gradients for parameters, not just inputs
533
+ def allSum(vs):
534
+ # drop allows us to remove some values from ever being used
535
+ # to test unused outputs
536
+ if drop is not None:
537
+ vs = vs[:-drop]
538
+ # we don't want all the grad for all the outputs to be the same
539
+ # so we multiply each by a constant
540
+ return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None)
541
+ if input_tensors is None:
542
+ input_tensors = reference_tensors
543
+
544
+ def flatten_inputs(inputs):
545
+ def input_reduce(input, fn, acc):
546
+ if isinstance(input, torch.Tensor):
547
+ fn(input, acc)
548
+ elif isinstance(input, dict):
549
+ reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc)
550
+ else:
551
+ reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc)
552
+ return acc
553
+ return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), []))
554
+
555
+ nograd_inputs = reference_tensors
556
+ if inputs_require_grads:
557
+ recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors)
558
+ flattened_recording_inputs = flatten_inputs(recording_inputs)
559
+ else:
560
+ recording_inputs = reference_tensors
561
+
562
+ # `check_trace` is set to False because check_trace is run with @no_grad
563
+ # Also, `checkTrace` already does all the checks
564
+ # against python function
565
+ ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance,
566
+ _force_outplace=_force_outplace, check_trace=False)
567
+
568
+ if export_import:
569
+ ge = self.getExportImportCopy(ge)
570
+
571
+ if verbose:
572
+ print(ge.graph)
573
+
574
+ # test no gradients case
575
+ outputs = func(*nograd_inputs)
576
+ outputs_ge = ge(*nograd_inputs)
577
+ self.assertEqual(outputs, outputs_ge)
578
+
579
+ # test gradients case
580
+ outputs = func(*recording_inputs)
581
+ if inputs_require_grads:
582
+ grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs,
583
+ allow_unused=allow_unused)
584
+
585
+ outputs_ge = ge(*recording_inputs)
586
+ if inputs_require_grads:
587
+ grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs,
588
+ allow_unused=allow_unused)
589
+ self.assertEqual(outputs, outputs_ge)
590
+ if inputs_require_grads:
591
+ self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
592
+
593
+ # test the grad grad case
594
+ outputs = func(*recording_inputs)
595
+ l1 = allSum(outputs)
596
+ if inputs_require_grads:
597
+ grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True,
598
+ allow_unused=allow_unused)
599
+ if inputs_require_grads:
600
+ l2 = (allSum(grads) * l1)
601
+ grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused)
602
+
603
+ if inputs_require_grads:
604
+ recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors)
605
+ flattened_recording_inputs = flatten_inputs(recording_inputs)
606
+
607
+ outputs_ge = ge(*recording_inputs)
608
+ l1_ge = allSum(outputs_ge)
609
+ if inputs_require_grads:
610
+ grads_ge = torch.autograd.grad(
611
+ l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused)
612
+
613
+ if inputs_require_grads:
614
+ l2_ge = (allSum(grads_ge) * l1_ge)
615
+ grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused)
616
+
617
+ self.assertEqual(outputs, outputs_ge)
618
+ if inputs_require_grads:
619
+ self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol)
620
+ for g2, g2_ge in zip(grads2, grads2_ge):
621
+ if g2 is None and g2_ge is None:
622
+ continue
623
+ self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4)
624
+
625
+ return ge
626
+
627
+ def checkModule(self, nn_module, args):
628
+ """
629
+ Check that a nn.Module's results in Script mode match eager and that it
630
+ can be exported
631
+ """
632
+ sm = torch.jit.script(nn_module)
633
+
634
+ with freeze_rng_state():
635
+ eager_out = nn_module(*args)
636
+
637
+ with freeze_rng_state():
638
+ script_out = sm(*args)
639
+
640
+ self.assertEqual(eager_out, script_out)
641
+ self.assertExportImportModule(sm, args)
642
+
643
+ return sm
644
+
645
+ class NoTracerWarnContextManager:
646
+ def __enter__(self):
647
+ self.prev = torch._C._jit_get_tracer_state_warn()
648
+ torch._C._jit_set_tracer_state_warn(False)
649
+
650
+ def __exit__(self, *args):
651
+ torch._C._jit_set_tracer_state_warn(self.prev)
652
+
653
+ @contextmanager
654
+ def inline_everything_mode(should_inline):
655
+ old = torch._C._jit_get_inline_everything_mode()
656
+ torch._C._jit_set_inline_everything_mode(should_inline)
657
+ try:
658
+ yield
659
+ finally:
660
+ torch._C._jit_set_inline_everything_mode(old)
661
+
662
+ @contextmanager
663
+ def set_fusion_group_inlining(inlining):
664
+ old = torch._C._debug_get_fusion_group_inlining()
665
+ torch._C._debug_set_fusion_group_inlining(inlining)
666
+ try:
667
+ yield
668
+ finally:
669
+ torch._C._debug_set_fusion_group_inlining(old)
670
+
671
+ # note: not re-entrant, use unnested only
672
+ @contextmanager
673
+ def disable_autodiff_subgraph_inlining(enabled=True):
674
+ torch._C._debug_set_autodiff_subgraph_inlining(not enabled)
675
+ try:
676
+ yield
677
+ finally:
678
+ torch._C._debug_set_autodiff_subgraph_inlining(True)
679
+
680
+ def _inline_everything(fn):
681
+ @functools.wraps(fn)
682
+ def wrapper(*args, **kwargs):
683
+ with inline_everything_mode(True):
684
+ fn(*args, **kwargs)
685
+ return wrapper
686
+
687
+ # this exists for forward compatibility reasons temporarily.
688
+ # TODO(suo) remove
689
+ def _tmp_donotuse_dont_inline_everything(fn):
690
+ @functools.wraps(fn)
691
+ def wrapper(*args, **kwargs):
692
+ with inline_everything_mode(False):
693
+ fn(*args, **kwargs)
694
+ return wrapper
695
+
696
+ # make it easy to quicky define/trace a function for these tests
697
+ def _trace(*args, **kwargs):
698
+ def wrapper(func):
699
+ return torch.jit.trace(func, args, **kwargs)
700
+ return wrapper
701
+
702
+
703
+ def enable_cpu_fuser(fn):
704
+ def wrapper(*args, **kwargs):
705
+ torch._C._jit_override_can_fuse_on_cpu_legacy(True)
706
+ torch._C._jit_override_can_fuse_on_cpu(True)
707
+ torch._C._jit_set_te_must_use_llvm_cpu(False)
708
+ try:
709
+ fn(*args, **kwargs)
710
+ finally:
711
+ torch._C._jit_override_can_fuse_on_cpu_legacy(False)
712
+ torch._C._jit_override_can_fuse_on_cpu(False)
713
+ torch._C._jit_set_te_must_use_llvm_cpu(True)
714
+ return wrapper
715
+
716
+
717
+ def enable_cpu_fuser_if(cond):
718
+ if cond:
719
+ return enable_cpu_fuser
720
+ else:
721
+ def noop_fuser(fn):
722
+ def wrapper(*args, **kwargs):
723
+ return fn(*args, **kwargs)
724
+ return wrapper
725
+ return noop_fuser
726
+
727
+ def get_forward(c):
728
+ return c._get_method('forward')
729
+
730
+ def get_forward_graph(c):
731
+ return c._get_method('forward').graph
732
+
733
+ def get_module_method(m, module, method):
734
+ return m._c.getattr(module)._get_method(method)
735
+
736
+ def attrs_with_prefix(module, prefix):
737
+ return [x for x, _ in module._modules._c.items()
738
+ if x.startswith(prefix)]
739
+
740
+ def warmup_backward(f, *args):
741
+ profiling_count = 3
742
+ results = []
743
+ for i in range(profiling_count):
744
+ if len(args) > 0:
745
+ r = torch.autograd.grad(f, *args)
746
+ results.append(r)
747
+ else:
748
+ f.backward(retain_graph=True)
749
+
750
+ return results
751
+
752
+ # TODO: Remove me once https://bugs.python.org/issue42666 is resolved
753
+ def make_global(*args):
754
+ for arg in args:
755
+ setattr(sys.modules[arg.__module__], arg.__name__, arg)
756
+
757
+ # Helper function to eval Python3 code without causing a syntax error for
758
+ # this file under py2
759
+ def _get_py3_code(code, fn_name):
760
+ with tempfile.TemporaryDirectory() as tmp_dir:
761
+ script_path = os.path.join(tmp_dir, 'script.py')
762
+ with open(script_path, 'w') as f:
763
+ f.write(code)
764
+ spec = importlib.util.spec_from_file_location(fn_name, script_path)
765
+ module = importlib.util.module_from_spec(spec)
766
+ loader = spec.loader
767
+ assert isinstance(loader, Loader) # Assert type to meet MyPy requirement
768
+ loader.exec_module(module)
769
+ fn = getattr(module, fn_name)
770
+ return fn
771
+
772
+ class TensorExprTestOptions:
773
+ def __init__(self) -> None:
774
+ self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)
775
+ self.old_profiling_mode = torch._C._get_graph_executor_optimize(True)
776
+
777
+ self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()
778
+ self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()
779
+ torch._C._jit_override_can_fuse_on_cpu(True)
780
+ torch._C._jit_override_can_fuse_on_gpu(True)
781
+ self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
782
+ torch._C._jit_set_texpr_fuser_enabled(True)
783
+ self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
784
+ torch._C._debug_set_fusion_group_inlining(False)
785
+ self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
786
+ torch._C._jit_set_te_must_use_llvm_cpu(False)
787
+
788
+ def restore(self):
789
+ torch._C._jit_set_profiling_executor(self.old_profiling_executor)
790
+ torch._C._get_graph_executor_optimize(self.old_profiling_mode)
791
+
792
+ torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
793
+ torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)
794
+ torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)
795
+ torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
796
+ torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
797
+
798
+ def clone_inputs(args):
799
+ inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = []
800
+
801
+ for arg in args:
802
+ if isinstance(arg, torch.Tensor):
803
+ inputs.append(arg.detach().clone())
804
+ elif is_iterable_of_tensors(arg):
805
+ inputs.append([t.detach().clone() for t in arg])
806
+ else:
807
+ inputs.append(arg)
808
+
809
+ return inputs
810
+
811
+ def get_traced_sample_variant_pairs(device, dtype, op):
812
+ # tuples of (variant, sample)
813
+ outputs: List[Tuple[Any, Any]] = []
814
+
815
+ samples = op.sample_inputs(device, dtype)
816
+
817
+ # Acquires variants to test
818
+ func = op.get_op()
819
+ method = op.get_method()
820
+ variants = {
821
+ # TODO: inplace tests currently fail, fix and add inplace variant
822
+ 'function': func, 'method': method,
823
+ }
824
+
825
+ # TODO: find better way to standardize on op registration itself..
826
+ has_fake_function = op.name in ["resize_", 'resize_as_']
827
+
828
+ if has_fake_function:
829
+ variants = {'method': getattr(torch.Tensor, op.name)}
830
+
831
+ # In eager mode, these ops can take (Tensor, bool) args; but in
832
+ # JIT they can only take (Tensor, Scalar), and bool is not a
833
+ # scalar in the JIT type system. So to test these in JIT, the bool
834
+ # is converted to an int for the test.
835
+ ops_with_unsupported_bool_args = [
836
+ {
837
+ "name": "div_floor_rounding",
838
+ "arg_idx": [0],
839
+ },
840
+ {
841
+ "name": "div_no_rounding_mode",
842
+ "arg_idx": [0],
843
+ },
844
+ {
845
+ "name": "div_trunc_rounding",
846
+ "arg_idx": [0],
847
+ },
848
+ {
849
+ "name": "index_fill",
850
+ "arg_idx": [2],
851
+ },
852
+ {
853
+ "name": "full_like",
854
+ "arg_idx": [0],
855
+ },
856
+ {
857
+ "name": "mul",
858
+ "arg_idx": [0],
859
+ },
860
+ {
861
+ "name": "new_full",
862
+ "arg_idx": [1],
863
+ },
864
+ ]
865
+
866
+ # doesn't support tracing
867
+ if has_fake_function:
868
+ return outputs
869
+
870
+ for sample in samples:
871
+ for variant in variants.values():
872
+ if variant is None:
873
+ continue
874
+
875
+ if is_lambda(variant):
876
+ continue
877
+
878
+ matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args)
879
+ for op_data in matching_ops:
880
+ for idx in op_data["arg_idx"]:
881
+ args = list(sample.args)
882
+ if len(sample.args) > idx and isinstance(sample.args[idx], bool):
883
+ args[idx] = int(args[idx])
884
+ sample.args = tuple(args)
885
+
886
+ outputs.append((variant, sample))
887
+
888
+ return outputs
889
+
890
+ # types.LambdaType gave false positives
891
+ def is_lambda(lamb):
892
+ LAMBDA = lambda: 0 # noqa: E731
893
+ return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__