Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +1 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/__init__.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_classes.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_compile.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_guards.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_ops.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_storage_docs.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_streambase.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_tensor_str.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_utils.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/_vmap_internals.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/hub.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/library.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/serialization.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/storage.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/torch_version.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/__pycache__/version.cpython-310.pyc +0 -0
- vila/lib/python3.10/site-packages/torch/_inductor/__init__.py +150 -0
- vila/lib/python3.10/site-packages/torch/_inductor/bounds.py +124 -0
- vila/lib/python3.10/site-packages/torch/_inductor/codecache.py +0 -0
- vila/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py +273 -0
- vila/lib/python3.10/site-packages/torch/_inductor/comms.py +363 -0
- vila/lib/python3.10/site-packages/torch/_inductor/compile_fx.py +1451 -0
- vila/lib/python3.10/site-packages/torch/_inductor/config.py +752 -0
- vila/lib/python3.10/site-packages/torch/_inductor/constant_folding.py +264 -0
- vila/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py +2159 -0
- vila/lib/python3.10/site-packages/torch/_inductor/cudagraph_utils.py +105 -0
- vila/lib/python3.10/site-packages/torch/_inductor/dependencies.py +506 -0
- vila/lib/python3.10/site-packages/torch/_inductor/exc.py +98 -0
- vila/lib/python3.10/site-packages/torch/_inductor/freezing.py +266 -0
- vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py +212 -0
- vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py +786 -0
- vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py +611 -0
- vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/reinplace.py +537 -0
- vila/lib/python3.10/site-packages/torch/_inductor/fx_utils.py +220 -0
- vila/lib/python3.10/site-packages/torch/_inductor/graph.py +1324 -0
- vila/lib/python3.10/site-packages/torch/_inductor/hooks.py +28 -0
- vila/lib/python3.10/site-packages/torch/_inductor/index_propagation.py +277 -0
- vila/lib/python3.10/site-packages/torch/_inductor/ir.py +0 -0
- vila/lib/python3.10/site-packages/torch/_inductor/metrics.py +419 -0
- vila/lib/python3.10/site-packages/torch/_inductor/ops_handler.py +655 -0
- vila/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py +118 -0
- vila/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py +15 -0
- vila/lib/python3.10/site-packages/torch/_inductor/scheduler.py +2445 -0
- vila/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py +1156 -0
.gitattributes
CHANGED
|
@@ -1072,3 +1072,4 @@ vila/lib/python3.10/site-packages/sympy/core/__pycache__/numbers.cpython-310.pyc
|
|
| 1072 |
vila/lib/python3.10/site-packages/sympy/core/__pycache__/function.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1073 |
vila/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1074 |
vila/lib/python3.10/site-packages/sympy/core/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1072 |
vila/lib/python3.10/site-packages/sympy/core/__pycache__/function.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1073 |
vila/lib/python3.10/site-packages/sympy/combinatorics/__pycache__/perm_groups.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1074 |
vila/lib/python3.10/site-packages/sympy/core/__pycache__/expr.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1075 |
+
vila/lib/python3.10/site-packages/torch/lib/libcusparseLt-f8b4a9fb.so.0 filter=lfs diff=lfs merge=lfs -text
|
vila/lib/python3.10/site-packages/torch/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (67.3 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_appdirs.cpython-310.pyc
ADDED
|
Binary file (21.8 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_classes.cpython-310.pyc
ADDED
|
Binary file (2.49 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_compile.cpython-310.pyc
ADDED
|
Binary file (1.09 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_custom_ops.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_guards.cpython-310.pyc
ADDED
|
Binary file (24 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_jit_internal.cpython-310.pyc
ADDED
|
Binary file (36.1 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc
ADDED
|
Binary file (4.95 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_ops.cpython-310.pyc
ADDED
|
Binary file (27 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_python_dispatcher.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_storage_docs.cpython-310.pyc
ADDED
|
Binary file (1.54 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_streambase.cpython-310.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_tensor_str.cpython-310.pyc
ADDED
|
Binary file (16.2 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_utils.cpython-310.pyc
ADDED
|
Binary file (25.8 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/_vmap_internals.cpython-310.pyc
ADDED
|
Binary file (6.87 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/hub.cpython-310.pyc
ADDED
|
Binary file (27.1 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/library.cpython-310.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/serialization.cpython-310.pyc
ADDED
|
Binary file (43.6 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/storage.cpython-310.pyc
ADDED
|
Binary file (43.8 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/torch_version.cpython-310.pyc
ADDED
|
Binary file (2.48 kB). View file
|
|
|
vila/lib/python3.10/site-packages/torch/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (433 Bytes). View file
|
|
|
vila/lib/python3.10/site-packages/torch/_inductor/__init__.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional
|
| 2 |
+
|
| 3 |
+
import torch.fx
|
| 4 |
+
import torch.utils._pytree as pytree
|
| 5 |
+
|
| 6 |
+
__all__ = ["compile", "list_mode_options", "list_options", "cudagraph_mark_step_begin"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def compile(
|
| 10 |
+
gm: torch.fx.GraphModule,
|
| 11 |
+
example_inputs: List[torch.Tensor],
|
| 12 |
+
options: Optional[Dict[str, Any]] = None,
|
| 13 |
+
):
|
| 14 |
+
"""
|
| 15 |
+
Compile a given FX graph with TorchInductor. This allows compiling
|
| 16 |
+
FX graphs captured without using TorchDynamo.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
gm: The FX graph to compile.
|
| 20 |
+
example_inputs: List of tensor inputs.
|
| 21 |
+
options: Optional dict of config options. See `torch._inductor.config`.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
Callable with same behavior as gm but faster.
|
| 25 |
+
"""
|
| 26 |
+
from .compile_fx import compile_fx
|
| 27 |
+
|
| 28 |
+
return compile_fx(gm, example_inputs, config_patches=options)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def aot_compile(
|
| 32 |
+
gm: torch.fx.GraphModule,
|
| 33 |
+
example_inputs: List[torch.Tensor],
|
| 34 |
+
options: Optional[Dict[str, Any]] = None,
|
| 35 |
+
) -> str:
|
| 36 |
+
"""
|
| 37 |
+
Ahead-of-time compile a given FX graph with TorchInductor into a shared library.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
gm: The FX graph to compile.
|
| 41 |
+
example_inputs: List of tensor inputs.
|
| 42 |
+
options: Optional dict of config options. See `torch._inductor.config`.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
Path to the generated shared library
|
| 46 |
+
"""
|
| 47 |
+
from .compile_fx import compile_fx_aot
|
| 48 |
+
|
| 49 |
+
# We will serialize the pytree info into the .so as constant strings
|
| 50 |
+
in_spec = None
|
| 51 |
+
out_spec = None
|
| 52 |
+
if isinstance(gm.graph._codegen, torch.fx.graph._PyTreeCodeGen):
|
| 53 |
+
codegen = gm.graph._codegen
|
| 54 |
+
gm.graph._codegen = torch.fx.graph.CodeGen()
|
| 55 |
+
gm.recompile()
|
| 56 |
+
|
| 57 |
+
if codegen.pytree_info.in_spec is not None:
|
| 58 |
+
in_spec = codegen.pytree_info.in_spec
|
| 59 |
+
if codegen.pytree_info.out_spec is not None:
|
| 60 |
+
out_spec = codegen.pytree_info.out_spec
|
| 61 |
+
|
| 62 |
+
else:
|
| 63 |
+
if hasattr(gm, "_in_spec"):
|
| 64 |
+
in_spec = gm._in_spec
|
| 65 |
+
if hasattr(gm, "_out_spec"):
|
| 66 |
+
out_spec = gm._out_spec
|
| 67 |
+
|
| 68 |
+
serialized_in_spec = pytree.treespec_dumps(in_spec) if in_spec is not None else ""
|
| 69 |
+
serialized_out_spec = (
|
| 70 |
+
pytree.treespec_dumps(out_spec) if out_spec is not None else ""
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
options = (
|
| 74 |
+
{
|
| 75 |
+
"aot_inductor.serialized_in_spec": serialized_in_spec,
|
| 76 |
+
"aot_inductor.serialized_out_spec": serialized_out_spec,
|
| 77 |
+
}
|
| 78 |
+
if options is None
|
| 79 |
+
else {
|
| 80 |
+
**options,
|
| 81 |
+
"aot_inductor.serialized_in_spec": serialized_in_spec,
|
| 82 |
+
"aot_inductor.serialized_out_spec": serialized_out_spec,
|
| 83 |
+
}
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
return compile_fx_aot(
|
| 87 |
+
gm,
|
| 88 |
+
example_inputs,
|
| 89 |
+
config_patches=options,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def list_mode_options(
|
| 94 |
+
mode: Optional[str] = None, dynamic: Optional[bool] = None
|
| 95 |
+
) -> Dict[str, Any]:
|
| 96 |
+
r"""Returns a dictionary describing the optimizations that each of the available
|
| 97 |
+
modes passed to `torch.compile()` performs.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
mode (str, optional): The mode to return the optimizations for.
|
| 101 |
+
If None, returns optimizations for all modes
|
| 102 |
+
dynamic (bool, optional): Whether dynamic shape is enabled.
|
| 103 |
+
|
| 104 |
+
Example::
|
| 105 |
+
>>> torch._inductor.list_mode_options()
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
mode_options: Dict[str, Dict[str, bool]] = {
|
| 109 |
+
"default": {},
|
| 110 |
+
# enable cudagraphs
|
| 111 |
+
"reduce-overhead": {
|
| 112 |
+
"triton.cudagraphs": True,
|
| 113 |
+
},
|
| 114 |
+
# enable max-autotune
|
| 115 |
+
"max-autotune-no-cudagraphs": {
|
| 116 |
+
"max_autotune": True,
|
| 117 |
+
},
|
| 118 |
+
# enable max-autotune
|
| 119 |
+
# enable cudagraphs
|
| 120 |
+
"max-autotune": {
|
| 121 |
+
"max_autotune": True,
|
| 122 |
+
"triton.cudagraphs": True,
|
| 123 |
+
},
|
| 124 |
+
}
|
| 125 |
+
return mode_options[mode] if mode else mode_options # type: ignore[return-value]
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def list_options() -> List[str]:
|
| 129 |
+
r"""Returns a dictionary describing the optimizations and debug configurations
|
| 130 |
+
that are available to `torch.compile()`.
|
| 131 |
+
|
| 132 |
+
The options are documented in `torch._inductor.config`.
|
| 133 |
+
|
| 134 |
+
Example::
|
| 135 |
+
|
| 136 |
+
>>> torch._inductor.list_options()
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
from torch._inductor import config
|
| 140 |
+
|
| 141 |
+
current_config: Dict[str, Any] = config.shallow_copy_dict()
|
| 142 |
+
|
| 143 |
+
return list(current_config.keys())
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def cudagraph_mark_step_begin():
|
| 147 |
+
"Indicates that a new iteration of inference or training is about to begin."
|
| 148 |
+
from .cudagraph_trees import mark_step_begin
|
| 149 |
+
|
| 150 |
+
mark_step_begin()
|
vila/lib/python3.10/site-packages/torch/_inductor/bounds.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from functools import partial
|
| 3 |
+
from typing import Any, Callable, Dict
|
| 4 |
+
|
| 5 |
+
from sympy import Expr
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch.utils._sympy.value_ranges import bound_sympy, ValueRangeAnalysis, ValueRanges
|
| 9 |
+
from .ir import InterpreterShim, LoopBody, LoopBodyBlock
|
| 10 |
+
from .utils import cache_on_self, dominated_nodes
|
| 11 |
+
from .virtualized import V
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BoundVars:
|
| 15 |
+
"""
|
| 16 |
+
Performs Value Range Analysis on LoopBody's fx graph by calling BoundVars.run()
|
| 17 |
+
It exposes the ranges of the nodes in the `bounds` variable
|
| 18 |
+
|
| 19 |
+
Note. A current limitation of this analysis is that it just works on a per-loop basis.
|
| 20 |
+
We should be able to propagate the bounds between across the whole graph. This may benefit
|
| 21 |
+
the case a bounded variable is returned by a kernel and fed into another.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, loop_body: LoopBody) -> None:
|
| 25 |
+
self.loop_body = loop_body
|
| 26 |
+
self.replacement_vals = {
|
| 27 |
+
k: ValueRanges[Expr](0, v - 1)
|
| 28 |
+
if (isinstance(v, int) or v.is_number)
|
| 29 |
+
else bound_sympy(v)
|
| 30 |
+
for k, v in loop_body.var_ranges.items()
|
| 31 |
+
}
|
| 32 |
+
# avoid computing these values, pessimistically assume that they are unbounded
|
| 33 |
+
self.unbounded_vars = dominated_nodes(
|
| 34 |
+
node
|
| 35 |
+
for node in self.loop_body.get_nodes()
|
| 36 |
+
if node.target in ["load", "reduction", operator.getitem]
|
| 37 |
+
or "masked_subblock" in node.target
|
| 38 |
+
)
|
| 39 |
+
# To access this variable call `get_bounds()`
|
| 40 |
+
self._bounds: Dict[torch.fx.Node, ValueRanges[Expr]] = {}
|
| 41 |
+
|
| 42 |
+
@cache_on_self
|
| 43 |
+
def get_bounds(self) -> Dict[torch.fx.Node, ValueRanges[Expr]]:
|
| 44 |
+
submodules = self.swap_submodules(self.loop_body.submodules)
|
| 45 |
+
|
| 46 |
+
# Initialize the environment with the unbounded variables
|
| 47 |
+
for node in self.unbounded_vars:
|
| 48 |
+
# we need to evaluate masked_subblock to recurse, and we need to set indirect values
|
| 49 |
+
if not isinstance(node.target, str) or (
|
| 50 |
+
"masked_subblock" not in node.target
|
| 51 |
+
and "set_indirect" not in node.target
|
| 52 |
+
):
|
| 53 |
+
self._bounds[node] = ValueRanges[Expr].unknown()
|
| 54 |
+
|
| 55 |
+
with V.set_ops_handler(ValueRangeAnalysis()):
|
| 56 |
+
interpreter = InterpreterShim(self.loop_body.root_block.graph, submodules)
|
| 57 |
+
interpreter.run(V.get_ops_handler(), initial_env=self._bounds)
|
| 58 |
+
return self._bounds
|
| 59 |
+
|
| 60 |
+
def swap_submodules(
|
| 61 |
+
self, submodules: Dict[str, Callable[..., Any]]
|
| 62 |
+
) -> Dict[str, Callable[..., ValueRanges[Expr]]]:
|
| 63 |
+
result: Dict[str, Callable[..., ValueRanges[Expr]]] = {}
|
| 64 |
+
for key in submodules.keys():
|
| 65 |
+
if key == "get_index":
|
| 66 |
+
result[key] = self.get_index
|
| 67 |
+
elif "masked_subblock" in key:
|
| 68 |
+
subblock = self.loop_body.subblocks[key]
|
| 69 |
+
# The result within the lambda will reference to the final
|
| 70 |
+
# set of modules at the end of the for-loop as it stores a reference to it
|
| 71 |
+
|
| 72 |
+
# bind subblock in a function because python lambdas close over by reference
|
| 73 |
+
# moving the lambda out of make_fn would close over the reference to subblock,
|
| 74 |
+
# so all lambdas would have the same subblock reference that is the final
|
| 75 |
+
# subblock in the loop
|
| 76 |
+
def make_fn(subblock):
|
| 77 |
+
return lambda mask, value: self.masked_subblock(
|
| 78 |
+
subblock, self._bounds, mask, value, result
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
result[key] = make_fn(subblock)
|
| 82 |
+
|
| 83 |
+
elif "set_indirect" in key:
|
| 84 |
+
idx = int(key[len("set_indirect") :])
|
| 85 |
+
var = self.loop_body.indirect_vars[idx]
|
| 86 |
+
indirect = partial(self.set_indirect, var)
|
| 87 |
+
result[key] = indirect
|
| 88 |
+
else:
|
| 89 |
+
assert "scan" in key
|
| 90 |
+
result[key] = submodules[key]
|
| 91 |
+
|
| 92 |
+
return result
|
| 93 |
+
|
| 94 |
+
def masked_subblock(
|
| 95 |
+
self,
|
| 96 |
+
subblock: LoopBodyBlock,
|
| 97 |
+
env: Dict[torch.fx.Node, ValueRanges[Expr]],
|
| 98 |
+
mask: Any,
|
| 99 |
+
value: Any,
|
| 100 |
+
submodules: Dict[str, Callable[..., Any]],
|
| 101 |
+
) -> ValueRanges[Expr]:
|
| 102 |
+
interp = InterpreterShim(subblock.graph, submodules)
|
| 103 |
+
interp.run(V.get_ops_handler(), initial_env=env)
|
| 104 |
+
output = [node for node in subblock.graph.nodes if node.target == "output"]
|
| 105 |
+
assert len(output) == 1
|
| 106 |
+
# dont bother unioning with value since the load from buffer will be
|
| 107 |
+
# pessimistically assumed to be inf anyway
|
| 108 |
+
return interp.env[output[0]]
|
| 109 |
+
|
| 110 |
+
def set_indirect(self, old: Expr, new: ValueRanges[Expr]) -> ValueRanges[Expr]:
|
| 111 |
+
assert isinstance(new, ValueRanges)
|
| 112 |
+
self.replacement_vals[old] = new
|
| 113 |
+
return new
|
| 114 |
+
|
| 115 |
+
def get_index(self, name: Expr) -> ValueRanges[Expr]:
|
| 116 |
+
expr = self.loop_body.indexing_exprs[name]
|
| 117 |
+
bound = self.replacement_vals.get(expr)
|
| 118 |
+
if bound is None:
|
| 119 |
+
bound = bound_sympy(expr, self.replacement_vals)
|
| 120 |
+
# The following assertion is true at the time of this writing
|
| 121 |
+
# We don't assert is as to not execute bound_sympy when bound is not None
|
| 122 |
+
# assert bound is None or bound == bound_sympy(expr, self.replacement_vals)
|
| 123 |
+
self.replacement_vals[name] = bound
|
| 124 |
+
return bound
|
vila/lib/python3.10/site-packages/torch/_inductor/codecache.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vila/lib/python3.10/site-packages/torch/_inductor/comm_analysis.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from enum import IntEnum
|
| 3 |
+
|
| 4 |
+
import sympy
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from . import ir
|
| 8 |
+
|
| 9 |
+
from .utils import get_dtype_size, sympy_product
|
| 10 |
+
from .virtualized import V
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class NCCL_COLL(IntEnum):
|
| 14 |
+
ALL_REDUCE = 0
|
| 15 |
+
ALL_GATHER = 1
|
| 16 |
+
REDUCE_SCATTER = 2
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class NVIDIA_GPU_TYPE(IntEnum):
|
| 20 |
+
VOLTA = 0
|
| 21 |
+
AMPERE = 1
|
| 22 |
+
HOPPER = 2
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_gpu_type() -> NVIDIA_GPU_TYPE:
|
| 26 |
+
gpu_info = torch.utils.collect_env.get_gpu_info(torch.utils.collect_env.run) or ""
|
| 27 |
+
if "V100" in gpu_info:
|
| 28 |
+
return NVIDIA_GPU_TYPE.VOLTA
|
| 29 |
+
elif "A100" in gpu_info:
|
| 30 |
+
return NVIDIA_GPU_TYPE.AMPERE
|
| 31 |
+
elif "H100" in gpu_info:
|
| 32 |
+
return NVIDIA_GPU_TYPE.HOPPER
|
| 33 |
+
else:
|
| 34 |
+
# for other gpu types, assume Ampere
|
| 35 |
+
return NVIDIA_GPU_TYPE.AMPERE
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_collective_type(node: ir.IRNode) -> NCCL_COLL:
|
| 39 |
+
if isinstance(node, ir._CollectiveKernel):
|
| 40 |
+
kernel_name = node.python_kernel_name
|
| 41 |
+
assert kernel_name is not None
|
| 42 |
+
if "all_reduce" in kernel_name:
|
| 43 |
+
return NCCL_COLL.ALL_REDUCE
|
| 44 |
+
elif "all_gather" in kernel_name:
|
| 45 |
+
return NCCL_COLL.ALL_GATHER
|
| 46 |
+
elif "reduce_scatter" in kernel_name:
|
| 47 |
+
return NCCL_COLL.REDUCE_SCATTER
|
| 48 |
+
else:
|
| 49 |
+
raise Exception(f"Unsupported collective kernel: {kernel_name}")
|
| 50 |
+
|
| 51 |
+
if isinstance(node, (ir.AllReduce, ir.AllReduceCoalesced)):
|
| 52 |
+
return NCCL_COLL.ALL_REDUCE
|
| 53 |
+
elif isinstance(node, (ir.AllGatherIntoTensor, ir.AllGatherIntoTensorCoalesced)):
|
| 54 |
+
return NCCL_COLL.ALL_GATHER
|
| 55 |
+
elif isinstance(node, (ir.ReduceScatterTensor, ir.ReduceScatterTensorCoalesced)):
|
| 56 |
+
return NCCL_COLL.REDUCE_SCATTER
|
| 57 |
+
else:
|
| 58 |
+
raise Exception(f"Unsupported collective type: {node}")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_collective_input_size_bytes(node: ir.IRNode) -> int:
|
| 62 |
+
sz_bytes = 0
|
| 63 |
+
for inp in node.inputs: # type: ignore[attr-defined]
|
| 64 |
+
shape = inp.layout.size
|
| 65 |
+
numel = sympy_product(inp.layout.size)
|
| 66 |
+
if isinstance(numel, sympy.Integer):
|
| 67 |
+
# For ease of testing
|
| 68 |
+
numel = int(numel)
|
| 69 |
+
else:
|
| 70 |
+
numel = V.graph.sizevars.size_hint(numel)
|
| 71 |
+
sz_bytes += numel * get_dtype_size(inp.layout.dtype)
|
| 72 |
+
return sz_bytes
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def get_collective_group_size(node: ir.IRNode) -> int:
|
| 76 |
+
if type(node) == ir._CollectiveKernel:
|
| 77 |
+
from torch.distributed.distributed_c10d import _get_group_size_by_name
|
| 78 |
+
|
| 79 |
+
return _get_group_size_by_name(node.constant_args[-1])
|
| 80 |
+
elif isinstance(node, ir.CollectiveKernel):
|
| 81 |
+
return node.constant_args[2] # type: ignore[attr-defined]
|
| 82 |
+
else:
|
| 83 |
+
raise TypeError(f"Unsupported collective type: {node}")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
####################################################################################################################
|
| 87 |
+
# The following code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
|
| 88 |
+
####################################################################################################################
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class NCCL_HW(IntEnum):
|
| 92 |
+
NVLINK = 0
|
| 93 |
+
PCI = 1
|
| 94 |
+
NET = 2
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class NCCL_ALGO(IntEnum):
|
| 98 |
+
TREE = 0
|
| 99 |
+
RING = 1
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class NCCL_PROTO(IntEnum):
|
| 103 |
+
# The ordering and enum values here matches original in
|
| 104 |
+
# https://github.com/NVIDIA/nccl/blob/0b083e52096c387bad7a5c5c65b26a9dca54de8c/src/include/devcomm.h#L28
|
| 105 |
+
# For difference between these protocols, see https://github.com/NVIDIA/nccl/issues/281#issuecomment-571816990
|
| 106 |
+
LL = 0 # Low-latency
|
| 107 |
+
# LL128 = 1 # Low-latency 128-byte
|
| 108 |
+
# SIMPLE = 2
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# Latencies in us
|
| 112 |
+
# len(NCCL_ALGO) x len(NCCL_PROTO)
|
| 113 |
+
# NOTE: use array instead of tensor to prevent incompatibility with fake mode
|
| 114 |
+
baseLat = [
|
| 115 |
+
# Tree
|
| 116 |
+
[
|
| 117 |
+
6.8, # LL
|
| 118 |
+
],
|
| 119 |
+
# Ring
|
| 120 |
+
[
|
| 121 |
+
6.6, # LL
|
| 122 |
+
],
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
# Latencies in us
|
| 126 |
+
# len(NCCL_HW) x len(NCCL_ALGO) x len(NCCL_PROTO)
|
| 127 |
+
hwLat = [
|
| 128 |
+
# NVLINK
|
| 129 |
+
[
|
| 130 |
+
[0.6], # Tree (LL)
|
| 131 |
+
[0.6], # Ring (LL)
|
| 132 |
+
],
|
| 133 |
+
# PCI
|
| 134 |
+
[
|
| 135 |
+
[1.0], # Tree (LL)
|
| 136 |
+
[1.0], # Ring (LL)
|
| 137 |
+
],
|
| 138 |
+
# NET
|
| 139 |
+
[
|
| 140 |
+
[5.0], # Tree (LL)
|
| 141 |
+
[2.7], # Ring (LL)
|
| 142 |
+
],
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# LL128 max BW per channel
|
| 147 |
+
llMaxBws = [
|
| 148 |
+
# Volta-N1/Intel-N2/Intel-N4
|
| 149 |
+
[
|
| 150 |
+
39.0,
|
| 151 |
+
39.0,
|
| 152 |
+
20.4,
|
| 153 |
+
],
|
| 154 |
+
# Ampere-N1/AMD-N2/AMD-N4
|
| 155 |
+
[
|
| 156 |
+
87.7,
|
| 157 |
+
22.5, # avg of ring & tree
|
| 158 |
+
19.0,
|
| 159 |
+
],
|
| 160 |
+
# Hopper-N1/AMD-N2/AMD-N4
|
| 161 |
+
[
|
| 162 |
+
87.7,
|
| 163 |
+
22.5, # avg of ring & tree
|
| 164 |
+
19.0,
|
| 165 |
+
],
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def estimate_nccl_collective_runtime(node: ir.IRNode) -> float:
|
| 170 |
+
"""
|
| 171 |
+
Returns estimated NCCL collective runtime in nanoseconds (ns).
|
| 172 |
+
|
| 173 |
+
The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
|
| 174 |
+
We aim to estimate the runtime as accurately as possible.
|
| 175 |
+
|
| 176 |
+
Assumptions:
|
| 177 |
+
- only ring algorithm (NCCL_ALGO_RING) is used
|
| 178 |
+
- only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
|
| 179 |
+
- 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
|
| 180 |
+
- collective is one of: allreduce, reducescatter, allgather
|
| 181 |
+
"""
|
| 182 |
+
tensor_storage_size_bytes = get_collective_input_size_bytes(node)
|
| 183 |
+
# Convert bytes to GB
|
| 184 |
+
tensor_storage_size_GB = tensor_storage_size_bytes / 1024 / 1024 / 1024
|
| 185 |
+
|
| 186 |
+
# Currently assumes each node has 8 gpus. And when >1 node is used, assumes each node uses all 8 gpus.
|
| 187 |
+
# TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
|
| 188 |
+
num_gpus_per_node = 8
|
| 189 |
+
group_size = get_collective_group_size(node)
|
| 190 |
+
nNodes = math.ceil(group_size / num_gpus_per_node)
|
| 191 |
+
nRanks = group_size # this is total # of gpus globally that participate in this collective op
|
| 192 |
+
|
| 193 |
+
if nRanks <= 1:
|
| 194 |
+
return 0
|
| 195 |
+
|
| 196 |
+
# Assumes ring algorithm
|
| 197 |
+
nccl_algo = NCCL_ALGO.RING
|
| 198 |
+
nccl_proto = NCCL_PROTO.LL
|
| 199 |
+
coll = get_collective_type(node)
|
| 200 |
+
|
| 201 |
+
# =============== bandwidth computation ===============
|
| 202 |
+
# First compute bandwidth in GB/s; then at the end, convert it to GB/ns
|
| 203 |
+
|
| 204 |
+
bwIntra = torch._inductor.config.intra_node_bw
|
| 205 |
+
bwInter = torch._inductor.config.inter_node_bw
|
| 206 |
+
|
| 207 |
+
compCapIndex = get_gpu_type()
|
| 208 |
+
index2 = nNodes - 1 if nNodes <= 2 else 2
|
| 209 |
+
# LL: for single node, we look at GPU type; for multi-node, we look at CPU type
|
| 210 |
+
index1 = compCapIndex if nNodes == 1 else 0
|
| 211 |
+
llMaxBw = llMaxBws[index1][index2]
|
| 212 |
+
|
| 213 |
+
# NOTE: each step of ring algorithm is synchronized,
|
| 214 |
+
# and is bottlenecked by the slowest link which is the inter-node interconnect.
|
| 215 |
+
# hence when nNodes >= 2, bw is inter-node bandwidth.
|
| 216 |
+
# NOTE: the original code in https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc
|
| 217 |
+
# have this as `if nNodes <= 2` which seems wrong. Corrected it here.
|
| 218 |
+
bw = bwIntra if nNodes == 1 else bwInter
|
| 219 |
+
nChannels = 2 # Assume # channels is 2
|
| 220 |
+
busBw = nChannels * bw
|
| 221 |
+
|
| 222 |
+
# Various model refinements
|
| 223 |
+
busBw = min(
|
| 224 |
+
llMaxBw,
|
| 225 |
+
busBw
|
| 226 |
+
* (1.0 / 4.0 if (nNodes > 1 or coll == NCCL_COLL.ALL_REDUCE) else 1.0 / 3.0),
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
if coll == NCCL_COLL.ALL_REDUCE:
|
| 230 |
+
nsteps = 2 * (nRanks - 1)
|
| 231 |
+
elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
|
| 232 |
+
nsteps = nRanks - 1
|
| 233 |
+
|
| 234 |
+
# Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
|
| 235 |
+
ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined]
|
| 236 |
+
bandwidth = busBw * ratio
|
| 237 |
+
# Convert GB/s to GB/ns
|
| 238 |
+
bandwidth_GB_per_ns = bandwidth / 1e9
|
| 239 |
+
|
| 240 |
+
# =============== latency computation ===============
|
| 241 |
+
intraHw = NCCL_HW.NVLINK
|
| 242 |
+
hw = intraHw if nNodes == 1 else NCCL_HW.NET
|
| 243 |
+
|
| 244 |
+
if coll == NCCL_COLL.ALL_REDUCE:
|
| 245 |
+
if nNodes > 1:
|
| 246 |
+
nInterSteps = 2 * nNodes
|
| 247 |
+
else:
|
| 248 |
+
nInterSteps = 0
|
| 249 |
+
elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
|
| 250 |
+
nInterSteps = nNodes - 1
|
| 251 |
+
|
| 252 |
+
# First compute latency in us; then at the end, convert it to ns
|
| 253 |
+
latency = baseLat[nccl_algo][nccl_proto]
|
| 254 |
+
intraLat = hwLat[intraHw][nccl_algo][nccl_proto]
|
| 255 |
+
interLat = hwLat[NCCL_HW.NET][nccl_algo][nccl_proto]
|
| 256 |
+
|
| 257 |
+
# Inter-node rings still have to launch nsteps * net overhead.
|
| 258 |
+
netOverhead = 0.0
|
| 259 |
+
if nNodes > 1:
|
| 260 |
+
netOverhead = 1.0 # getNetOverhead(comm);
|
| 261 |
+
intraLat = max(intraLat, netOverhead)
|
| 262 |
+
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined]
|
| 263 |
+
# Convert us to ns
|
| 264 |
+
latency_ns = latency * 1e3
|
| 265 |
+
|
| 266 |
+
# =============== final result ===============
|
| 267 |
+
transport_ns = tensor_storage_size_GB / bandwidth_GB_per_ns
|
| 268 |
+
return transport_ns + latency_ns
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
################################################################################################################
|
| 272 |
+
# The above code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
|
| 273 |
+
################################################################################################################
|
vila/lib/python3.10/site-packages/torch/_inductor/comms.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# pyre-strict
|
| 2 |
+
|
| 3 |
+
from typing import List
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from . import config, ir, scheduler
|
| 8 |
+
from .dependencies import WeakDep
|
| 9 |
+
from .utils import tuple_sorted
|
| 10 |
+
|
| 11 |
+
overlap_log = torch._logging.getArtifactLogger(__name__, "overlap")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def sink_waits(
|
| 15 |
+
snodes: List["scheduler.BaseSchedulerNode"],
|
| 16 |
+
) -> List["scheduler.BaseSchedulerNode"]:
|
| 17 |
+
"""
|
| 18 |
+
Greedily moves waits as late as possible (i.e. until we reach a use). Optimal in terms of
|
| 19 |
+
communication overlap.
|
| 20 |
+
"""
|
| 21 |
+
new_order = []
|
| 22 |
+
cur_waits = set()
|
| 23 |
+
for snode in snodes:
|
| 24 |
+
if isinstance(snode.node, ir.Wait):
|
| 25 |
+
cur_waits.add(snode)
|
| 26 |
+
else:
|
| 27 |
+
for wait in tuple_sorted(cur_waits):
|
| 28 |
+
if snode in wait.node_users:
|
| 29 |
+
new_order.append(wait)
|
| 30 |
+
cur_waits.remove(wait)
|
| 31 |
+
new_order.append(snode)
|
| 32 |
+
new_order.extend(tuple_sorted(cur_waits))
|
| 33 |
+
return new_order
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def raise_comms(
|
| 37 |
+
snodes: List["scheduler.BaseSchedulerNode"],
|
| 38 |
+
) -> List["scheduler.BaseSchedulerNode"]:
|
| 39 |
+
"""
|
| 40 |
+
Greedily moves comms as early as possible (i.e. until we reach an input).
|
| 41 |
+
Optimal in terms of communication overlap.
|
| 42 |
+
|
| 43 |
+
TODO: We might want to adjust this in the future to account for memory limitations.
|
| 44 |
+
e.g. when we are compiling FSDP, this heuristics will cause the all-gathers to be prefetched as soon as possible,
|
| 45 |
+
which is the beginning of the forwards pass. We'll have to either do a special pass for FSDP,
|
| 46 |
+
or we'll want to redo this pass with memory considerations so we handle the FSDP case in a general way.
|
| 47 |
+
"""
|
| 48 |
+
new_order_reversed: List["scheduler.BaseSchedulerNode"] = []
|
| 49 |
+
cur_comms: List["scheduler.BaseSchedulerNode"] = []
|
| 50 |
+
for snode in reversed(snodes):
|
| 51 |
+
if isinstance(snode.node, ir.CollectiveKernel):
|
| 52 |
+
cur_comms.append(snode)
|
| 53 |
+
else:
|
| 54 |
+
for comm in cur_comms:
|
| 55 |
+
assert len(comm.inverse_users) > 0
|
| 56 |
+
while len(cur_comms) > 0 and any(
|
| 57 |
+
snode in comm.inverse_users for comm in cur_comms
|
| 58 |
+
):
|
| 59 |
+
comm = cur_comms.pop(0)
|
| 60 |
+
new_order_reversed.append(comm)
|
| 61 |
+
new_order_reversed.append(snode)
|
| 62 |
+
assert len(cur_comms) <= 1
|
| 63 |
+
new_order_reversed.extend(tuple_sorted(cur_comms))
|
| 64 |
+
return new_order_reversed[::-1]
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_ancestors(node):
|
| 68 |
+
ancestors = set()
|
| 69 |
+
cur_nodes = [node]
|
| 70 |
+
while len(cur_nodes) > 0:
|
| 71 |
+
new_nodes = []
|
| 72 |
+
for node in cur_nodes:
|
| 73 |
+
for inp in node.inverse_users:
|
| 74 |
+
if inp not in ancestors:
|
| 75 |
+
ancestors.add(inp)
|
| 76 |
+
new_nodes.append(inp)
|
| 77 |
+
cur_nodes = new_nodes
|
| 78 |
+
return ancestors
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_descendants(node):
|
| 82 |
+
descendants = set()
|
| 83 |
+
cur_nodes = [node]
|
| 84 |
+
while len(cur_nodes) > 0:
|
| 85 |
+
new_nodes = []
|
| 86 |
+
for node in cur_nodes:
|
| 87 |
+
for inp in node.node_users:
|
| 88 |
+
if inp not in descendants:
|
| 89 |
+
descendants.add(inp)
|
| 90 |
+
new_nodes.append(inp)
|
| 91 |
+
cur_nodes = new_nodes
|
| 92 |
+
return descendants
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def decide_global_ordering_of_comms(nodes: List["scheduler.BaseSchedulerNode"]):
|
| 96 |
+
"""
|
| 97 |
+
Decide global ordering of comms, by just enforcing the ordering that's in the input graph
|
| 98 |
+
(might not be the same ordering as the eager mode program).
|
| 99 |
+
TODO: Come up with a better approach
|
| 100 |
+
"""
|
| 101 |
+
comm_nodes = [n for n in nodes if isinstance(n.node, ir.CollectiveKernel)]
|
| 102 |
+
for i in range(1, len(comm_nodes)):
|
| 103 |
+
# Enforce ordering by making previous comm a `WeakDep` dependency of the next comm
|
| 104 |
+
comm_nodes[i].add_fake_dep(WeakDep(comm_nodes[i - 1].get_name()))
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def assert_no_comm_nodes(snodes: List["scheduler.BaseSchedulerNode"]) -> None:
|
| 108 |
+
assert not any(isinstance(snode.node, ir.CollectiveKernel) for snode in snodes)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def estimate_op_runtime(snode: "scheduler.BaseSchedulerNode") -> float:
|
| 112 |
+
"""
|
| 113 |
+
Returns estimated op runtime in nanoseconds (ns)
|
| 114 |
+
"""
|
| 115 |
+
if config.estimate_op_runtime == "default":
|
| 116 |
+
runtime = snode.get_estimated_runtime()
|
| 117 |
+
else:
|
| 118 |
+
assert callable(config.estimate_op_runtime)
|
| 119 |
+
runtime = config.estimate_op_runtime(snode)
|
| 120 |
+
return runtime
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def reorder_compute_for_overlap(
|
| 124 |
+
snodes: List["scheduler.BaseSchedulerNode"],
|
| 125 |
+
) -> List["scheduler.BaseSchedulerNode"]:
|
| 126 |
+
"""
|
| 127 |
+
Decides a global ordering of all compute and communication nodes,
|
| 128 |
+
assuming that we already have a global ordering of communication nodes.
|
| 129 |
+
|
| 130 |
+
Overall scheduling procedure is:
|
| 131 |
+
Step 1: Given that we've currently scheduled comm N, we now schedule all compute nodes
|
| 132 |
+
that are required for comm N + 1 but do not depend on comm N, to run at the same time with comm N.
|
| 133 |
+
Step 2: If all those compute nodes are sufficient to overlap comm N, we're done.
|
| 134 |
+
Otherwise, we now need to look elsewhere to find compute that overlaps with comm N.
|
| 135 |
+
We prioritize compute nodes that are needed sooner.
|
| 136 |
+
Step 3: We schedule the compute nodes dependent on comm N and required for comm N + 1.
|
| 137 |
+
Step 4: We schedule comm N + 1.
|
| 138 |
+
Repeat this for subsequent comm nodes.
|
| 139 |
+
"""
|
| 140 |
+
final_order = []
|
| 141 |
+
|
| 142 |
+
comm_nodes = []
|
| 143 |
+
for snode in snodes:
|
| 144 |
+
if isinstance(snode.node, ir.CollectiveKernel):
|
| 145 |
+
comm_nodes.append(snode)
|
| 146 |
+
if len(comm_nodes) == 0:
|
| 147 |
+
# if there is no comm nodes, return the current order
|
| 148 |
+
return snodes
|
| 149 |
+
|
| 150 |
+
comm_ancestors = {node: get_ancestors(node) for node in comm_nodes}
|
| 151 |
+
comm_descendants = {node: get_descendants(node) for node in comm_nodes}
|
| 152 |
+
|
| 153 |
+
indeg = dict.fromkeys(snodes, 0)
|
| 154 |
+
for snode in snodes:
|
| 155 |
+
for user in snode.node_users:
|
| 156 |
+
if user in indeg:
|
| 157 |
+
indeg[user] += 1
|
| 158 |
+
ready_to_schedule_nodes = {node for node in snodes if indeg[node] == 0}
|
| 159 |
+
|
| 160 |
+
unscheduled_nodes = set()
|
| 161 |
+
unscheduled_nodes = set(snodes)
|
| 162 |
+
|
| 163 |
+
def schedule_node(snode):
|
| 164 |
+
"""
|
| 165 |
+
Schedule a single node.
|
| 166 |
+
"""
|
| 167 |
+
assert snode in unscheduled_nodes
|
| 168 |
+
assert snode in ready_to_schedule_nodes
|
| 169 |
+
ready_to_schedule_nodes.remove(snode)
|
| 170 |
+
unscheduled_nodes.remove(snode)
|
| 171 |
+
final_order.append(snode)
|
| 172 |
+
for user in tuple_sorted(snode.node_users):
|
| 173 |
+
if user in indeg:
|
| 174 |
+
indeg[user] -= 1
|
| 175 |
+
if indeg[user] == 0:
|
| 176 |
+
ready_to_schedule_nodes.add(user)
|
| 177 |
+
|
| 178 |
+
def schedule_nodes(snodes):
|
| 179 |
+
"""
|
| 180 |
+
Schedules all nodes in `snodes` in an arbitrary topologically valid order.
|
| 181 |
+
"""
|
| 182 |
+
all_nodes = set(snodes)
|
| 183 |
+
assert all(node in unscheduled_nodes for node in all_nodes)
|
| 184 |
+
while len(all_nodes) > 0:
|
| 185 |
+
# NOTE: since model graph is always a DAG and does not have circular dependency inside,
|
| 186 |
+
# there should be at least one node that is a "free node" (i.e. indeg == 0),
|
| 187 |
+
# hence infinite loop is not possible. But we check here just to be safe.
|
| 188 |
+
progress = False
|
| 189 |
+
for node in tuple_sorted(all_nodes):
|
| 190 |
+
if node in ready_to_schedule_nodes:
|
| 191 |
+
schedule_node(node)
|
| 192 |
+
all_nodes.remove(node)
|
| 193 |
+
progress = True
|
| 194 |
+
if not progress:
|
| 195 |
+
raise Exception(
|
| 196 |
+
"Unable to find a free node (indeg == 0). This is an impossible state to reach. "
|
| 197 |
+
"Please report a bug to PyTorch."
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# First, schedule all compute nodes that are required by first comm node,
|
| 201 |
+
# as well as the first comm node itself.
|
| 202 |
+
assert len(comm_nodes) > 0
|
| 203 |
+
schedule_nodes(
|
| 204 |
+
list(comm_ancestors[comm_nodes[0]]) + [comm_nodes[0]],
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
rolled_over_compute_cost = 0
|
| 208 |
+
for idx in range(1, len(comm_ancestors)):
|
| 209 |
+
# Step 1: Given that we've currently scheduled comm `idx-1`, we now schedule
|
| 210 |
+
# all compute nodes that are required for comm `idx` but do not depend on comm `idx-1`,
|
| 211 |
+
# to run at the same time with comm `idx-1`.
|
| 212 |
+
needed_by_next_comm_and_ready_compute_nodes = unscheduled_nodes & (
|
| 213 |
+
comm_ancestors[comm_nodes[idx]] - comm_descendants[comm_nodes[idx - 1]]
|
| 214 |
+
)
|
| 215 |
+
assert_no_comm_nodes(needed_by_next_comm_and_ready_compute_nodes)
|
| 216 |
+
|
| 217 |
+
total_compute_runtime_cost = rolled_over_compute_cost + sum(
|
| 218 |
+
[
|
| 219 |
+
estimate_op_runtime(node)
|
| 220 |
+
for node in needed_by_next_comm_and_ready_compute_nodes
|
| 221 |
+
]
|
| 222 |
+
)
|
| 223 |
+
prev_comm_runtime_cost = estimate_op_runtime(comm_nodes[idx - 1])
|
| 224 |
+
schedule_nodes(tuple_sorted(needed_by_next_comm_and_ready_compute_nodes))
|
| 225 |
+
|
| 226 |
+
# Step 2: If all those compute nodes are sufficient to overlap comm `idx-1`, we're done.
|
| 227 |
+
# Otherwise, we now need to look elsewhere to find compute that overlaps with comm `idx`.
|
| 228 |
+
# We prioritize compute nodes that are needed sooner.
|
| 229 |
+
step1_runtime_cost = total_compute_runtime_cost
|
| 230 |
+
if step1_runtime_cost >= prev_comm_runtime_cost:
|
| 231 |
+
pass
|
| 232 |
+
else:
|
| 233 |
+
# Find all ready to schedule compute nodes that do not depend on comm `idx-1`.
|
| 234 |
+
ready_to_schedule_compute_nodes = tuple_sorted(
|
| 235 |
+
ready_to_schedule_nodes - comm_descendants[comm_nodes[idx - 1]]
|
| 236 |
+
)
|
| 237 |
+
assert_no_comm_nodes(ready_to_schedule_compute_nodes)
|
| 238 |
+
|
| 239 |
+
def earliest_comm_descendant(node):
|
| 240 |
+
for idx in range(len(comm_nodes)):
|
| 241 |
+
if node in comm_ancestors[comm_nodes[idx]]:
|
| 242 |
+
return idx
|
| 243 |
+
return len(comm_nodes)
|
| 244 |
+
|
| 245 |
+
# Prioritize compute nodes that are needed sooner.
|
| 246 |
+
ready_to_schedule_compute_nodes = sorted(
|
| 247 |
+
ready_to_schedule_compute_nodes, key=earliest_comm_descendant
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
for snode in ready_to_schedule_compute_nodes:
|
| 251 |
+
if total_compute_runtime_cost >= prev_comm_runtime_cost:
|
| 252 |
+
# If accumulated compute runtime cost is greater than comm `idx-1` runtime cost,
|
| 253 |
+
# it means we have maximized overlap for comm `idx-1`, and hence we stop looking
|
| 254 |
+
# for more compute to schedule.
|
| 255 |
+
break
|
| 256 |
+
compute_runtime_cost = estimate_op_runtime(snode)
|
| 257 |
+
# If we're not able to leverage more than half of this
|
| 258 |
+
# node's compute to overlap, we skip it.
|
| 259 |
+
# TODO: Smarter heuristics here
|
| 260 |
+
if (
|
| 261 |
+
prev_comm_runtime_cost - total_compute_runtime_cost
|
| 262 |
+
) <= compute_runtime_cost / 2:
|
| 263 |
+
continue
|
| 264 |
+
schedule_node(snode)
|
| 265 |
+
total_compute_runtime_cost += compute_runtime_cost
|
| 266 |
+
rollable_compute_cost = total_compute_runtime_cost - step1_runtime_cost
|
| 267 |
+
|
| 268 |
+
# Step 3: We schedule the compute nodes dependent on comm `idx-1` and required for comm `idx`.
|
| 269 |
+
needed_by_next_comm_nodes = unscheduled_nodes & comm_ancestors[comm_nodes[idx]]
|
| 270 |
+
schedule_nodes(list(needed_by_next_comm_nodes))
|
| 271 |
+
|
| 272 |
+
# Step 4: We schedule comm `idx`.
|
| 273 |
+
schedule_nodes([comm_nodes[idx]])
|
| 274 |
+
|
| 275 |
+
is_prev_comm_blocking_next_comm = len(needed_by_next_comm_nodes) > 0
|
| 276 |
+
# The idea here is that if there are no compute nodes from Step 3
|
| 277 |
+
# (i.e. if prev comm is not blocking next comm), we can roll over the compute nodes
|
| 278 |
+
# in Step 2 to overlap with the next comm, since they're not required to finish
|
| 279 |
+
# before the next comm starts.
|
| 280 |
+
if is_prev_comm_blocking_next_comm:
|
| 281 |
+
rolled_over_compute_cost = 0
|
| 282 |
+
else:
|
| 283 |
+
rolled_over_compute_cost = rollable_compute_cost # type: ignore[assignment]
|
| 284 |
+
|
| 285 |
+
schedule_nodes(unscheduled_nodes)
|
| 286 |
+
return final_order
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def node_summary(snode):
|
| 290 |
+
detail = ""
|
| 291 |
+
if isinstance(snode.node, ir.ExternKernelOut):
|
| 292 |
+
detail = f" ({snode.node.python_kernel_name})"
|
| 293 |
+
out_tensor_info = ""
|
| 294 |
+
if (
|
| 295 |
+
hasattr(snode.node, "layout")
|
| 296 |
+
and hasattr(snode.node.layout, "size")
|
| 297 |
+
and hasattr(snode.node.layout, "stride")
|
| 298 |
+
):
|
| 299 |
+
out_tensor_info = (
|
| 300 |
+
f" (size={snode.node.layout.size}, stride={snode.node.layout.stride})"
|
| 301 |
+
)
|
| 302 |
+
node_name = ""
|
| 303 |
+
if hasattr(snode.node, "name"):
|
| 304 |
+
node_name = snode.node.name
|
| 305 |
+
return f"{snode.node.__class__.__name__}{detail}{out_tensor_info} ({node_name})"
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def visualize_overlap(order):
|
| 309 |
+
total_est_runtime: float = 0.0
|
| 310 |
+
cur_comm_node = None
|
| 311 |
+
for snode in order:
|
| 312 |
+
if cur_comm_node is None:
|
| 313 |
+
if isinstance(snode.node, ir.CollectiveKernel):
|
| 314 |
+
total_est_runtime += estimate_op_runtime(snode)
|
| 315 |
+
cur_comm_node = snode.node
|
| 316 |
+
elif isinstance(snode.node, ir.Wait):
|
| 317 |
+
raise Exception(
|
| 318 |
+
"Wait is not expected when there is no collective running"
|
| 319 |
+
)
|
| 320 |
+
else: # exposed compute op
|
| 321 |
+
total_est_runtime += estimate_op_runtime(snode)
|
| 322 |
+
overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
|
| 323 |
+
else: # cur_comm_node is not None
|
| 324 |
+
if isinstance(snode.node, ir.CollectiveKernel):
|
| 325 |
+
raise Exception(
|
| 326 |
+
"Found two collectives running at the same time. "
|
| 327 |
+
"`visualize_overlap` needs to be updated to handle this case"
|
| 328 |
+
)
|
| 329 |
+
elif isinstance(snode.node, ir.Wait): # end of this comm op
|
| 330 |
+
overlap_log.debug(f"{node_summary(snode)}") # noqa: G004
|
| 331 |
+
cur_comm_node = None
|
| 332 |
+
else: # overlapped compute op
|
| 333 |
+
overlap_log.debug(f"| {node_summary(snode)}") # noqa: G004
|
| 334 |
+
overlap_log.debug(
|
| 335 |
+
f"Est. runtime (ms): {total_est_runtime / 1000 / 1000}" # noqa: G004
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def reorder_compute_and_comm_for_overlap(
|
| 340 |
+
snodes: List["scheduler.BaseSchedulerNode"],
|
| 341 |
+
) -> List["scheduler.BaseSchedulerNode"]:
|
| 342 |
+
order = snodes
|
| 343 |
+
for p in config.reorder_for_compute_comm_overlap_passes:
|
| 344 |
+
if isinstance(p, str) and p in globals():
|
| 345 |
+
p = globals()[p] # it is a builtin pass
|
| 346 |
+
if torch.distributed.get_rank() == 0:
|
| 347 |
+
overlap_log.debug(
|
| 348 |
+
f"==== Visualize overlap before reordering pass {p} ====" # noqa: G004
|
| 349 |
+
)
|
| 350 |
+
try:
|
| 351 |
+
visualize_overlap(order)
|
| 352 |
+
except Exception as e:
|
| 353 |
+
overlap_log.debug(str(e))
|
| 354 |
+
order = p(order) # type: ignore[operator]
|
| 355 |
+
if torch.distributed.get_rank() == 0:
|
| 356 |
+
overlap_log.debug(
|
| 357 |
+
f"==== Visualize overlap after reordering pass {p} ====" # noqa: G004
|
| 358 |
+
)
|
| 359 |
+
try:
|
| 360 |
+
visualize_overlap(order)
|
| 361 |
+
except Exception as e:
|
| 362 |
+
overlap_log.debug(str(e))
|
| 363 |
+
return order
|
vila/lib/python3.10/site-packages/torch/_inductor/compile_fx.py
ADDED
|
@@ -0,0 +1,1451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import functools
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import time
|
| 7 |
+
import warnings
|
| 8 |
+
from itertools import count
|
| 9 |
+
|
| 10 |
+
from typing import (
|
| 11 |
+
Any,
|
| 12 |
+
Callable,
|
| 13 |
+
Dict,
|
| 14 |
+
FrozenSet,
|
| 15 |
+
List,
|
| 16 |
+
Optional,
|
| 17 |
+
Sequence,
|
| 18 |
+
Tuple,
|
| 19 |
+
Union,
|
| 20 |
+
)
|
| 21 |
+
from unittest import mock
|
| 22 |
+
|
| 23 |
+
from functorch.compile import min_cut_rematerialization_partition
|
| 24 |
+
|
| 25 |
+
import torch.fx
|
| 26 |
+
import torch.utils._pytree as pytree
|
| 27 |
+
from torch._dynamo import (
|
| 28 |
+
compiled_autograd,
|
| 29 |
+
config as dynamo_config,
|
| 30 |
+
logging as dynamo_logging,
|
| 31 |
+
utils as dynamo_utils,
|
| 32 |
+
)
|
| 33 |
+
from torch._dynamo.utils import (
|
| 34 |
+
counters,
|
| 35 |
+
detect_fake_mode,
|
| 36 |
+
lazy_format_graph_code,
|
| 37 |
+
optimus_scuba_log,
|
| 38 |
+
)
|
| 39 |
+
from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
|
| 40 |
+
from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
|
| 41 |
+
from torch._inductor.cudagraph_utils import BoxedDeviceIndex
|
| 42 |
+
|
| 43 |
+
from torch._inductor.debug import save_args_for_compile_fx_inner
|
| 44 |
+
from torch._inductor.utils import BoxedBool, count_tangents
|
| 45 |
+
from torch._logging import trace_structured
|
| 46 |
+
from torch._ops import OpOverload
|
| 47 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 48 |
+
from torch._utils_internal import signpost_event
|
| 49 |
+
from torch.fx.passes.fake_tensor_prop import FakeTensorProp
|
| 50 |
+
|
| 51 |
+
from .._dynamo.backends.common import aot_autograd
|
| 52 |
+
from ..fx._lazy_graph_module import _use_lazy_graph_module # type: ignore[attr-defined]
|
| 53 |
+
from ..fx.graph import _PyTreeCodeGen
|
| 54 |
+
from . import config, metrics
|
| 55 |
+
from .debug import DebugContext
|
| 56 |
+
from .decomposition import select_decomp_table
|
| 57 |
+
from .fx_passes.joint_graph import joint_graph_passes
|
| 58 |
+
from .fx_passes.post_grad import post_grad_passes, view_to_reshape
|
| 59 |
+
from .fx_passes.pre_grad import pre_grad_passes
|
| 60 |
+
from .graph import GraphLowering
|
| 61 |
+
from .ir import ExternKernelNode
|
| 62 |
+
from .utils import get_dtype_size, has_incompatible_cudagraph_ops, output_node
|
| 63 |
+
from .virtualized import V
|
| 64 |
+
|
| 65 |
+
if config.is_fbcode():
|
| 66 |
+
from torch._inductor.fb.utils import time_and_log
|
| 67 |
+
else:
|
| 68 |
+
# no-op decorator
|
| 69 |
+
def time_and_log(attr: str, extra_loggings: Optional[Dict[str, str]] = None):
|
| 70 |
+
return dynamo_utils.identity
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
log = logging.getLogger(__name__)
|
| 74 |
+
perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
|
| 75 |
+
post_grad_graphs_log = torch._logging.getArtifactLogger(__name__, "post_grad_graphs")
|
| 76 |
+
ALIGNMENT = 16
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# copy_ fails when trying to write to tensors with memory overlap,
|
| 80 |
+
# for expanded dimensions (a dimension which used to have size 1 -> ?)
|
| 81 |
+
# we can select one element from that dimension and write to it
|
| 82 |
+
# to achieve writing to all values of that dimension of the input tensor
|
| 83 |
+
def get_expanded_dims(t):
|
| 84 |
+
if not isinstance(t, torch.Tensor):
|
| 85 |
+
return None
|
| 86 |
+
return [i for i in range(t.ndim) if t.stride(i) == 0 and t.size(i) != 1]
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def index_expanded_dims(t: torch.Tensor, expanded_dims: List[int]) -> torch.Tensor:
|
| 90 |
+
for expanded_dim in expanded_dims:
|
| 91 |
+
t = torch.ops.aten.slice(t, expanded_dim, 0, 1)
|
| 92 |
+
return t
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def complex_memory_overlap(t: torch.Tensor) -> bool:
|
| 96 |
+
# if torch._debug_has_internal_overlap thinks this tensor potentially has
|
| 97 |
+
# memory overlap internally, let's dig deeper to find out whether it's true.
|
| 98 |
+
t = index_expanded_dims(t, get_expanded_dims(t))
|
| 99 |
+
if torch._debug_has_internal_overlap(t) != 0:
|
| 100 |
+
strides = t.stride()
|
| 101 |
+
sizes = t.shape
|
| 102 |
+
indices = list(range(len(strides)))
|
| 103 |
+
indices = [x for _, x in sorted(zip(strides, indices))]
|
| 104 |
+
for i in range(len(strides)):
|
| 105 |
+
prev_stride = 1 if i == 0 else strides[indices[i - 1]]
|
| 106 |
+
prev_size = 1 if i == 0 else sizes[indices[i - 1]]
|
| 107 |
+
if strides[indices[i]] < prev_stride * prev_size:
|
| 108 |
+
return True
|
| 109 |
+
return False
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@functools.lru_cache(None)
|
| 113 |
+
def _step_logger():
|
| 114 |
+
return dynamo_logging.get_step_logger(log)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@functools.lru_cache(None)
|
| 118 |
+
def _warn_tf32_disabled():
|
| 119 |
+
if (
|
| 120 |
+
torch.cuda.is_available()
|
| 121 |
+
and not torch.backends.cuda.matmul.allow_tf32
|
| 122 |
+
and torch.cuda.get_device_capability() >= (8, 0)
|
| 123 |
+
):
|
| 124 |
+
warnings.warn(
|
| 125 |
+
"TensorFloat32 tensor cores for float32 matrix multiplication available but not enabled. "
|
| 126 |
+
"Consider setting `torch.set_float32_matmul_precision('high')` for better performance."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _unlift_graph(mod, gm, graph_signature):
|
| 131 |
+
from torch.export.unflatten import _assign_attr, _AttrKind
|
| 132 |
+
|
| 133 |
+
state_dict = {}
|
| 134 |
+
for name, param in mod.named_parameters(remove_duplicate=False):
|
| 135 |
+
state_dict[name] = param
|
| 136 |
+
_assign_attr(
|
| 137 |
+
param,
|
| 138 |
+
gm,
|
| 139 |
+
name,
|
| 140 |
+
attr_kind=_AttrKind.PARAMETER,
|
| 141 |
+
)
|
| 142 |
+
for name, buffer in mod.named_buffers(remove_duplicate=False):
|
| 143 |
+
state_dict[name] = buffer
|
| 144 |
+
_assign_attr(
|
| 145 |
+
buffer,
|
| 146 |
+
gm,
|
| 147 |
+
name,
|
| 148 |
+
attr_kind=_AttrKind.BUFFER,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
placeholder_nodes = [node for node in gm.graph.nodes if node.op == "placeholder"]
|
| 152 |
+
lifted_inputs = []
|
| 153 |
+
for node in placeholder_nodes:
|
| 154 |
+
node_name = node.name
|
| 155 |
+
if node_name in graph_signature.inputs_to_parameters:
|
| 156 |
+
lifted_inputs.append(graph_signature.inputs_to_parameters[node_name])
|
| 157 |
+
elif node_name in graph_signature.inputs_to_buffers:
|
| 158 |
+
lifted_inputs.append(graph_signature.inputs_to_buffers[node_name])
|
| 159 |
+
else:
|
| 160 |
+
assert node_name in graph_signature.user_inputs
|
| 161 |
+
lifted_inputs.append(None)
|
| 162 |
+
|
| 163 |
+
from torch.export._unlift import _unlift
|
| 164 |
+
|
| 165 |
+
outputs = list(gm.graph.nodes)[-1].args[0]
|
| 166 |
+
mutated_outputs = []
|
| 167 |
+
for out in outputs:
|
| 168 |
+
if out in graph_signature.buffers_to_mutate:
|
| 169 |
+
mutated_outputs.append(graph_signature.buffers_to_mutate[out.name])
|
| 170 |
+
else:
|
| 171 |
+
mutated_outputs.append(None)
|
| 172 |
+
|
| 173 |
+
unlifted_gm = _unlift(
|
| 174 |
+
gm,
|
| 175 |
+
lifted_inputs,
|
| 176 |
+
mutated_outputs,
|
| 177 |
+
pytree.LeafSpec(),
|
| 178 |
+
None,
|
| 179 |
+
state_dict,
|
| 180 |
+
{},
|
| 181 |
+
)
|
| 182 |
+
return unlifted_gm
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _get_subgraph_names(gm):
|
| 186 |
+
for node in gm.graph.nodes:
|
| 187 |
+
if node.target == torch.ops.higher_order.cond:
|
| 188 |
+
true_subgraph_name = node.args[1].name
|
| 189 |
+
false_subgraph_name = node.args[2].name
|
| 190 |
+
yield true_subgraph_name
|
| 191 |
+
yield false_subgraph_name
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def _recursive_pre_grad_passes(gm, example_inputs):
|
| 195 |
+
for subgraph_name in _get_subgraph_names(gm):
|
| 196 |
+
subgraph = getattr(gm, subgraph_name)
|
| 197 |
+
# as we don't have recursive example inputs, passing None here
|
| 198 |
+
new_subgraph = _recursive_pre_grad_passes(subgraph, example_inputs=None)
|
| 199 |
+
setattr(gm, subgraph_name, new_subgraph)
|
| 200 |
+
return pre_grad_passes(gm, example_inputs)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def _recursive_joint_graph_passes(gm):
|
| 204 |
+
for subgraph_name in _get_subgraph_names(gm):
|
| 205 |
+
subgraph = getattr(gm, subgraph_name)
|
| 206 |
+
_recursive_joint_graph_passes(subgraph)
|
| 207 |
+
joint_graph_passes(gm)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _recursive_post_grad_passes(gm, is_inference: bool = False):
|
| 211 |
+
for subgraph_name in _get_subgraph_names(gm):
|
| 212 |
+
subgraph = getattr(gm, subgraph_name)
|
| 213 |
+
_recursive_post_grad_passes(subgraph, is_inference)
|
| 214 |
+
post_grad_passes(gm, is_inference)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def split_const_gm(
|
| 218 |
+
gm: torch.fx.GraphModule,
|
| 219 |
+
) -> Tuple[torch.fx.GraphModule, Dict[str, int]]:
|
| 220 |
+
"""
|
| 221 |
+
This function takes an GraphModule input "gm".
|
| 222 |
+
The gm will be split into 2 components,
|
| 223 |
+
1) const_gm, which consists the subgraph of gm that can be constant folded.
|
| 224 |
+
2) gm (being inplace modified,) which returns the graph after constant folding.
|
| 225 |
+
|
| 226 |
+
const_output_index is a mapping of corresponding node name from gm to the
|
| 227 |
+
output index of const_gm.
|
| 228 |
+
Returns (const_gm, const_output_index)
|
| 229 |
+
"""
|
| 230 |
+
from torch._inductor.constant_folding import (
|
| 231 |
+
CONST_MODULE_TAG,
|
| 232 |
+
META_TAG,
|
| 233 |
+
MODULE_TAG,
|
| 234 |
+
replace_node_with_constant,
|
| 235 |
+
run_and_get_constant_graph,
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
const_gm = run_and_get_constant_graph(gm)
|
| 239 |
+
const_result = const_gm()
|
| 240 |
+
|
| 241 |
+
const_outputs = {
|
| 242 |
+
x.name: idx for idx, x in enumerate(tuple(const_gm.graph.nodes)[-1].args[0])
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
to_erase_node = []
|
| 246 |
+
to_replace_node = []
|
| 247 |
+
const_output_index = {}
|
| 248 |
+
for node in gm.graph.nodes:
|
| 249 |
+
if node.name in const_outputs:
|
| 250 |
+
to_replace_node.append(node)
|
| 251 |
+
elif node.meta[META_TAG] == CONST_MODULE_TAG:
|
| 252 |
+
to_erase_node.append(node)
|
| 253 |
+
|
| 254 |
+
for node in to_replace_node:
|
| 255 |
+
new_const_name = "_FOLDED_CONST_" + node.name
|
| 256 |
+
replace_node_with_constant(
|
| 257 |
+
gm,
|
| 258 |
+
node,
|
| 259 |
+
const_result[const_outputs[node.name]],
|
| 260 |
+
new_const_name,
|
| 261 |
+
)
|
| 262 |
+
const_output_index[new_const_name] = const_outputs[node.name]
|
| 263 |
+
for node in to_erase_node[::-1]:
|
| 264 |
+
if node.users:
|
| 265 |
+
for n in node.users:
|
| 266 |
+
assert n.meta[META_TAG] == MODULE_TAG, f"node: {node} user not empty."
|
| 267 |
+
else:
|
| 268 |
+
gm.graph.erase_node(node)
|
| 269 |
+
gm.recompile()
|
| 270 |
+
|
| 271 |
+
return const_gm, const_output_index
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def is_tf32_warning_applicable(gm: torch.fx.GraphModule):
|
| 275 |
+
aten = torch.ops.aten
|
| 276 |
+
tf32_ops = {
|
| 277 |
+
aten.mm.default,
|
| 278 |
+
aten.addmm.default,
|
| 279 |
+
aten.bmm.default,
|
| 280 |
+
aten.baddbmm.default,
|
| 281 |
+
}
|
| 282 |
+
for node in gm.graph.nodes:
|
| 283 |
+
if (
|
| 284 |
+
node.op == "call_function"
|
| 285 |
+
and node.target in tf32_ops
|
| 286 |
+
and isinstance(node.meta.get("val", None), torch.Tensor)
|
| 287 |
+
and node.meta["val"].dtype == torch.float32
|
| 288 |
+
and node.meta["val"].device.type == "cuda"
|
| 289 |
+
):
|
| 290 |
+
return True
|
| 291 |
+
return False
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@DebugContext.wrap
|
| 295 |
+
def count_bytes_inner(
|
| 296 |
+
gm: torch.fx.GraphModule,
|
| 297 |
+
example_inputs: List[torch.Tensor],
|
| 298 |
+
num_fixed: int = 0,
|
| 299 |
+
**kwargs,
|
| 300 |
+
):
|
| 301 |
+
shape_env = _shape_env_from_inputs(example_inputs)
|
| 302 |
+
fake_mode = fake_tensor_prop(gm, example_inputs)
|
| 303 |
+
|
| 304 |
+
with V.set_fake_mode(fake_mode):
|
| 305 |
+
_recursive_post_grad_passes(gm, False)
|
| 306 |
+
|
| 307 |
+
graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)
|
| 308 |
+
with V.set_graph_handler(graph), V.set_real_inputs(example_inputs):
|
| 309 |
+
graph.run(*example_inputs)
|
| 310 |
+
num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes()
|
| 311 |
+
metrics.num_bytes_accessed += num_bytes
|
| 312 |
+
metrics.nodes_num_elem += nodes_num_elem
|
| 313 |
+
metrics.node_runtimes += node_runtimes
|
| 314 |
+
return make_boxed_func(gm.forward)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def fake_tensor_prop(
|
| 318 |
+
gm: torch.fx.GraphModule,
|
| 319 |
+
example_inputs: List[torch.Tensor],
|
| 320 |
+
force_allow_non_fake_inputs: bool = False,
|
| 321 |
+
):
|
| 322 |
+
"""
|
| 323 |
+
If we can not detect fake mode from the context of inputs, create one.
|
| 324 |
+
|
| 325 |
+
The created fake mode will be returned.
|
| 326 |
+
"""
|
| 327 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 328 |
+
if not fake_mode:
|
| 329 |
+
fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
|
| 330 |
+
FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)
|
| 331 |
+
else:
|
| 332 |
+
ctx = (
|
| 333 |
+
contextlib.nullcontext()
|
| 334 |
+
if not force_allow_non_fake_inputs
|
| 335 |
+
else mock.patch.object(fake_mode, "allow_non_fake_inputs", True)
|
| 336 |
+
)
|
| 337 |
+
with ctx: # type: ignore[attr-defined]
|
| 338 |
+
FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(
|
| 339 |
+
*example_inputs
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
return fake_mode
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
# pass config dict back to user
|
| 346 |
+
def get_patched_config_dict(config_patches=None) -> Dict[str, Any]:
|
| 347 |
+
with config.patch(config_patches):
|
| 348 |
+
return config.get_config_copy()
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
@DebugContext.wrap
|
| 352 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 353 |
+
@time_and_log(
|
| 354 |
+
attr="compilation time (in seconds)",
|
| 355 |
+
extra_loggings={"config_dict": str(get_patched_config_dict())},
|
| 356 |
+
)
|
| 357 |
+
# Need this decorator for compile_fx_inner even if we already have one for
|
| 358 |
+
# compile_fx. The reason is the compilation for backward graph may happen after
|
| 359 |
+
# compile_fx return and we may want to use the _LazyGraphModule for compiling
|
| 360 |
+
# the backward graph as well.
|
| 361 |
+
@_use_lazy_graph_module(dynamo_config.use_lazy_graph_module)
|
| 362 |
+
@dynamo_utils.dynamo_timed(phase_name="inductor_compile")
|
| 363 |
+
def compile_fx_inner(
|
| 364 |
+
gm: torch.fx.GraphModule,
|
| 365 |
+
example_inputs: List[torch.Tensor],
|
| 366 |
+
cudagraphs: Optional[BoxedBool] = None,
|
| 367 |
+
num_fixed: int = 0,
|
| 368 |
+
is_backward: bool = False,
|
| 369 |
+
graph_id: Optional[int] = None,
|
| 370 |
+
cpp_wrapper: bool = False,
|
| 371 |
+
aot_mode: bool = False,
|
| 372 |
+
is_inference: bool = False,
|
| 373 |
+
boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
|
| 374 |
+
user_visible_outputs: FrozenSet[str] = frozenset(),
|
| 375 |
+
layout_opt: Optional[bool] = None,
|
| 376 |
+
extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
|
| 377 |
+
) -> Union[CompiledFxGraph, str]:
|
| 378 |
+
"""
|
| 379 |
+
Inductor API that compiles a single graph.
|
| 380 |
+
|
| 381 |
+
If you change the argument list for this function, make sure you
|
| 382 |
+
also update the call to save_args_for_compile_fx_inner below accordingly.
|
| 383 |
+
"""
|
| 384 |
+
if dynamo_utils.count_calls(gm.graph) == 0 and not aot_mode:
|
| 385 |
+
# trigger the real recompilation for _LazyGraphModule before returning
|
| 386 |
+
# the forward method.
|
| 387 |
+
from torch.fx._lazy_graph_module import _LazyGraphModule
|
| 388 |
+
|
| 389 |
+
_LazyGraphModule.force_recompile(gm)
|
| 390 |
+
return make_boxed_func(gm.forward)
|
| 391 |
+
|
| 392 |
+
assert isinstance(
|
| 393 |
+
next(iter(reversed(gm.graph.nodes))).args[0], (tuple, list)
|
| 394 |
+
), f"inductor can only compile FX graphs which return a tuple/list, but got {gm.graph}"
|
| 395 |
+
|
| 396 |
+
if config.save_args:
|
| 397 |
+
save_args_for_compile_fx_inner(
|
| 398 |
+
gm,
|
| 399 |
+
example_inputs,
|
| 400 |
+
cudagraphs=cudagraphs,
|
| 401 |
+
num_fixed=num_fixed,
|
| 402 |
+
is_backward=is_backward,
|
| 403 |
+
graph_id=graph_id,
|
| 404 |
+
cpp_wrapper=cpp_wrapper,
|
| 405 |
+
aot_mode=aot_mode,
|
| 406 |
+
is_inference=is_inference,
|
| 407 |
+
boxed_forward_device_index=boxed_forward_device_index,
|
| 408 |
+
user_visible_outputs=user_visible_outputs,
|
| 409 |
+
layout_opt=layout_opt,
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
if cudagraphs is None:
|
| 413 |
+
cudagraphs = BoxedBool(config.triton.cudagraphs)
|
| 414 |
+
|
| 415 |
+
# Inputs to fx_codegen_and_compile
|
| 416 |
+
# Anything that affects codegen should go here, so if the signature
|
| 417 |
+
# of fx_codegen_and_compile changes, the dict should be updated accordingly
|
| 418 |
+
graph_kwargs = {
|
| 419 |
+
"cudagraphs": cudagraphs,
|
| 420 |
+
"num_fixed": num_fixed,
|
| 421 |
+
"is_backward": is_backward,
|
| 422 |
+
"graph_id": graph_id,
|
| 423 |
+
"cpp_wrapper": cpp_wrapper,
|
| 424 |
+
"aot_mode": aot_mode,
|
| 425 |
+
"is_inference": is_inference,
|
| 426 |
+
"user_visible_outputs": user_visible_outputs,
|
| 427 |
+
"layout_opt": layout_opt,
|
| 428 |
+
"extern_node_serializer": extern_node_serializer,
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
start = time.time()
|
| 432 |
+
|
| 433 |
+
if config.fx_graph_cache and not aot_mode:
|
| 434 |
+
compiled_graph = FxGraphCache.load(
|
| 435 |
+
fx_codegen_and_compile, gm, example_inputs, graph_kwargs
|
| 436 |
+
)
|
| 437 |
+
else:
|
| 438 |
+
compiled_graph = fx_codegen_and_compile(
|
| 439 |
+
gm, example_inputs, **graph_kwargs # type: ignore[arg-type]
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
log.debug("FX codegen and compilation took %.3fs", time.time() - start)
|
| 443 |
+
|
| 444 |
+
# check cudagraph disabling reasons from inductor lowering
|
| 445 |
+
if cudagraphs and compiled_graph.disabled_cudagraphs_reason:
|
| 446 |
+
perf_hint_log.warning(
|
| 447 |
+
"skipping cudagraphs due to %s", compiled_graph.disabled_cudagraphs_reason
|
| 448 |
+
)
|
| 449 |
+
BoxedBool.disable(cudagraphs)
|
| 450 |
+
|
| 451 |
+
# Return the output strides to the caller via TracingContext
|
| 452 |
+
context = torch._guards.TracingContext.try_get()
|
| 453 |
+
if context is not None and context.output_strides is not None:
|
| 454 |
+
assert len(context.output_strides) == 0
|
| 455 |
+
context.output_strides.extend(compiled_graph.output_strides)
|
| 456 |
+
|
| 457 |
+
if aot_mode:
|
| 458 |
+
return compiled_graph
|
| 459 |
+
|
| 460 |
+
if cudagraphs:
|
| 461 |
+
# output args are tuple of first argument
|
| 462 |
+
output = output_node(gm)
|
| 463 |
+
assert len(output.args) == 1
|
| 464 |
+
stack_traces = [
|
| 465 |
+
(arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
|
| 466 |
+
for arg in output.args[0]
|
| 467 |
+
]
|
| 468 |
+
|
| 469 |
+
complex_memory_overlap_inputs = any(
|
| 470 |
+
complex_memory_overlap(t)
|
| 471 |
+
for t in example_inputs
|
| 472 |
+
if isinstance(t, torch.Tensor)
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
from torch._inductor.cudagraph_utils import check_for_mutation
|
| 476 |
+
|
| 477 |
+
has_mutation_str = check_for_mutation(gm, compiled_graph, num_fixed)
|
| 478 |
+
has_mutation = has_mutation_str is not None
|
| 479 |
+
|
| 480 |
+
if has_mutation:
|
| 481 |
+
compiled_graph.disabled_cudagraphs_reason = has_mutation_str
|
| 482 |
+
|
| 483 |
+
cudagraph_tests = [
|
| 484 |
+
(not has_mutation, "mutated inputs"),
|
| 485 |
+
(not has_incompatible_cudagraph_ops(gm), "incompatible ops"),
|
| 486 |
+
(not complex_memory_overlap_inputs, "complex memory overlap"),
|
| 487 |
+
(
|
| 488 |
+
all(
|
| 489 |
+
isinstance(t, (torch.Tensor, torch.SymInt)) for t in example_inputs
|
| 490 |
+
),
|
| 491 |
+
"non-Tensor inputs",
|
| 492 |
+
),
|
| 493 |
+
]
|
| 494 |
+
cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]
|
| 495 |
+
|
| 496 |
+
if not cudagraph_fail_reasons:
|
| 497 |
+
if not config.triton.cudagraph_trees:
|
| 498 |
+
# Force specialize all inputs so that CUDA graphs will work
|
| 499 |
+
for t in example_inputs:
|
| 500 |
+
if isinstance(t, torch.SymInt):
|
| 501 |
+
int(t) # guard
|
| 502 |
+
|
| 503 |
+
if (
|
| 504 |
+
boxed_forward_device_index is not None
|
| 505 |
+
and not is_inference
|
| 506 |
+
and not is_backward
|
| 507 |
+
):
|
| 508 |
+
boxed_forward_device_index.set(next(iter(compiled_graph.device_idxs)))
|
| 509 |
+
|
| 510 |
+
compiled_graph.current_callable = cudagraphify(
|
| 511 |
+
compiled_graph.get_current_callable(),
|
| 512 |
+
example_inputs,
|
| 513 |
+
static_input_idxs=range(num_fixed),
|
| 514 |
+
device_index=next(iter(compiled_graph.device_idxs)),
|
| 515 |
+
stack_traces=stack_traces,
|
| 516 |
+
is_backward=is_backward,
|
| 517 |
+
is_inference=is_inference,
|
| 518 |
+
constants=tuple(compiled_graph.constants.values()),
|
| 519 |
+
)
|
| 520 |
+
else:
|
| 521 |
+
BoxedBool.disable(cudagraphs)
|
| 522 |
+
|
| 523 |
+
# See [Backward Generation Handling]
|
| 524 |
+
# if cudagraph'd the forward and set the device, we need to let the cudagraph manager
|
| 525 |
+
# know we are we running the backward even if we will not run it in cudagraphs
|
| 526 |
+
if is_backward and config.triton.cudagraph_trees:
|
| 527 |
+
assert boxed_forward_device_index is not None
|
| 528 |
+
assert boxed_forward_device_index.value is not None
|
| 529 |
+
compiled_graph_callable = compiled_graph.get_current_callable()
|
| 530 |
+
|
| 531 |
+
manager = torch._inductor.cudagraph_trees.get_manager(
|
| 532 |
+
boxed_forward_device_index.value, create_if_none_exists=False
|
| 533 |
+
)
|
| 534 |
+
# should already exist from forward
|
| 535 |
+
assert manager is not None
|
| 536 |
+
|
| 537 |
+
def compiled_artifact(new_inputs):
|
| 538 |
+
manager.set_to_running_backward()
|
| 539 |
+
return compiled_graph_callable(new_inputs)
|
| 540 |
+
|
| 541 |
+
compiled_graph.current_callable = compiled_artifact
|
| 542 |
+
|
| 543 |
+
if "cuda" in compiled_graph.device_types:
|
| 544 |
+
# prefer better disable_cudagraphs_reason bc stack trace
|
| 545 |
+
# TODO: migrate all disable reasons to stack trace, refactor
|
| 546 |
+
if compiled_graph.disabled_cudagraphs_reason:
|
| 547 |
+
perf_hint_log.warning(compiled_graph.disabled_cudagraphs_reason)
|
| 548 |
+
else:
|
| 549 |
+
perf_hint_log.warning(
|
| 550 |
+
"skipping cudagraphs due to %s", cudagraph_fail_reasons
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
# cudagraphs does its own aligning of inputs
|
| 554 |
+
if not cudagraphs:
|
| 555 |
+
new_callable = align_inputs(
|
| 556 |
+
compiled_graph.get_current_callable(), example_inputs, range(num_fixed)
|
| 557 |
+
)
|
| 558 |
+
if new_callable is not compiled_graph.get_current_callable():
|
| 559 |
+
compiled_graph.current_callable = new_callable
|
| 560 |
+
|
| 561 |
+
_step_logger()(
|
| 562 |
+
logging.INFO,
|
| 563 |
+
"torchinductor done compiling "
|
| 564 |
+
f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
|
| 565 |
+
f"graph {graph_id}",
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
# aot autograd needs to know to pass in inputs as a list
|
| 569 |
+
compiled_graph._boxed_call = True
|
| 570 |
+
return compiled_graph
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def fx_codegen_and_compile(
|
| 574 |
+
gm: torch.fx.GraphModule,
|
| 575 |
+
example_inputs: List[torch.Tensor],
|
| 576 |
+
cudagraphs: Optional[BoxedBool] = None,
|
| 577 |
+
num_fixed: int = 0,
|
| 578 |
+
is_backward: bool = False,
|
| 579 |
+
graph_id: Optional[int] = None,
|
| 580 |
+
cpp_wrapper: bool = False,
|
| 581 |
+
aot_mode: bool = False,
|
| 582 |
+
is_inference: bool = False,
|
| 583 |
+
user_visible_outputs: FrozenSet[str] = frozenset(),
|
| 584 |
+
layout_opt: Optional[bool] = None,
|
| 585 |
+
extern_node_serializer: Optional[Callable[[List[ExternKernelNode]], Any]] = None,
|
| 586 |
+
) -> Union[CompiledFxGraph, str]:
|
| 587 |
+
if is_tf32_warning_applicable(gm):
|
| 588 |
+
_warn_tf32_disabled()
|
| 589 |
+
|
| 590 |
+
# lift the maximum depth of the Python interpreter stack
|
| 591 |
+
# to adapt large/deep models
|
| 592 |
+
sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000))
|
| 593 |
+
|
| 594 |
+
_step_logger()(
|
| 595 |
+
logging.INFO,
|
| 596 |
+
"torchinductor compiling "
|
| 597 |
+
f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
|
| 598 |
+
f"graph {graph_id}",
|
| 599 |
+
)
|
| 600 |
+
V.debug.fx_graph(gm, example_inputs)
|
| 601 |
+
# TODO: Should we actually dump this? It should be redundant with the aot
|
| 602 |
+
# structured logs...
|
| 603 |
+
# trace_structured("inductor_input_graph", payload_fn=lambda: gm.print_readable(print_output=False))
|
| 604 |
+
|
| 605 |
+
shape_env = _shape_env_from_inputs(example_inputs)
|
| 606 |
+
|
| 607 |
+
# Convert view to reshape in the graph. This is necessary primarily for
|
| 608 |
+
# layout optimization. Do it unconditionally for uniformity.
|
| 609 |
+
#
|
| 610 |
+
# It's needed because when we do layout optimization, an contiguous tensor
|
| 611 |
+
# in eager mode may becomes a channels last tensor. A view op previously
|
| 612 |
+
# can be applied to the contiguous tensor may not be able to be applied
|
| 613 |
+
# on the channels tensor any more. An error like
|
| 614 |
+
# RuntimeError: view size is not compatible with input tensor's size and stride
|
| 615 |
+
# (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
|
| 616 |
+
# will be printed.
|
| 617 |
+
#
|
| 618 |
+
# Replace view op to reshape op in this case.
|
| 619 |
+
# As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this.
|
| 620 |
+
#
|
| 621 |
+
# Also this has to be done before FakeTensorProp below to avoid the failed
|
| 622 |
+
# .view() call.
|
| 623 |
+
view_to_reshape(gm)
|
| 624 |
+
|
| 625 |
+
# It is safe to run FakeTensorProp under no_grad because by the time
|
| 626 |
+
# we're in inductor, we assume that AOTAutograd has already "taken care"
|
| 627 |
+
# of autograd, so there should be no more autograd-related API's in the
|
| 628 |
+
# graph.
|
| 629 |
+
with torch.no_grad():
|
| 630 |
+
fake_mode = fake_tensor_prop(gm, example_inputs)
|
| 631 |
+
|
| 632 |
+
# pattern matcher passes might not preserve striding information
|
| 633 |
+
# on node.meta["val"]. if in the future we rely on these being
|
| 634 |
+
# correct we will need to fix.
|
| 635 |
+
|
| 636 |
+
with V.set_fake_mode(fake_mode):
|
| 637 |
+
# has some issues with memory in training
|
| 638 |
+
_recursive_post_grad_passes(gm, is_inference=is_inference)
|
| 639 |
+
V.debug.fx_graph_transformed(gm, example_inputs)
|
| 640 |
+
post_grad_graphs_log.debug("%s", lazy_format_graph_code("AFTER POST GRAD", gm))
|
| 641 |
+
trace_structured(
|
| 642 |
+
"inductor_post_grad_graph",
|
| 643 |
+
payload_fn=lambda: gm.print_readable(print_output=False),
|
| 644 |
+
)
|
| 645 |
+
optimus_scuba_log["inductor_post_grad"] = counters["inductor"]
|
| 646 |
+
signpost_event(
|
| 647 |
+
"optimus",
|
| 648 |
+
"compile_fx.post_grad_passes",
|
| 649 |
+
optimus_scuba_log,
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
with V.set_fake_mode(fake_mode):
|
| 653 |
+
const_output_index = None
|
| 654 |
+
const_graph = None
|
| 655 |
+
const_code = None
|
| 656 |
+
|
| 657 |
+
if aot_mode and config.aot_inductor.use_runtime_constant_folding:
|
| 658 |
+
const_gm, const_output_index = split_const_gm(gm)
|
| 659 |
+
|
| 660 |
+
const_graph = GraphLowering(
|
| 661 |
+
const_gm,
|
| 662 |
+
example_inputs=[],
|
| 663 |
+
shape_env=shape_env,
|
| 664 |
+
num_static_inputs=num_fixed,
|
| 665 |
+
graph_id=graph_id,
|
| 666 |
+
cpp_wrapper=cpp_wrapper,
|
| 667 |
+
aot_mode=aot_mode,
|
| 668 |
+
user_visible_outputs=user_visible_outputs,
|
| 669 |
+
extern_node_serializer=extern_node_serializer,
|
| 670 |
+
is_inference=is_inference,
|
| 671 |
+
is_const_graph=True,
|
| 672 |
+
)
|
| 673 |
+
with V.set_graph_handler(const_graph):
|
| 674 |
+
assert cpp_wrapper, "AOT mode only supports C++ wrapper"
|
| 675 |
+
const_graph.run()
|
| 676 |
+
|
| 677 |
+
const_code, _ = const_graph.codegen_with_cpp_wrapper()
|
| 678 |
+
|
| 679 |
+
graph = GraphLowering(
|
| 680 |
+
gm,
|
| 681 |
+
# example_inputs will be used by AOTInductor to dry-run the generated code for Triton kernel tuning.
|
| 682 |
+
# For the forward pass, we have the real inputs to be used as example_inputs. For the backward pass,
|
| 683 |
+
# we currently use fake tensors and defake them later.
|
| 684 |
+
example_inputs=example_inputs,
|
| 685 |
+
shape_env=shape_env,
|
| 686 |
+
num_static_inputs=num_fixed,
|
| 687 |
+
graph_id=graph_id,
|
| 688 |
+
cpp_wrapper=cpp_wrapper,
|
| 689 |
+
aot_mode=aot_mode,
|
| 690 |
+
user_visible_outputs=user_visible_outputs,
|
| 691 |
+
extern_node_serializer=extern_node_serializer,
|
| 692 |
+
is_inference=is_inference,
|
| 693 |
+
const_output_index=const_output_index,
|
| 694 |
+
const_code=const_code,
|
| 695 |
+
const_module=const_graph,
|
| 696 |
+
)
|
| 697 |
+
with V.set_graph_handler(graph):
|
| 698 |
+
graph.run(*example_inputs)
|
| 699 |
+
output_strides: List[Optional[Tuple[int, ...]]] = []
|
| 700 |
+
if graph.graph_outputs is not None:
|
| 701 |
+
# We'll put the output strides in the compiled graph so we
|
| 702 |
+
# can later return them to the caller via TracingContext
|
| 703 |
+
for out in graph.graph_outputs:
|
| 704 |
+
if hasattr(out, "layout"):
|
| 705 |
+
output_strides.append(
|
| 706 |
+
tuple(
|
| 707 |
+
V.graph.sizevars.size_hint(s) for s in out.layout.stride
|
| 708 |
+
)
|
| 709 |
+
)
|
| 710 |
+
else:
|
| 711 |
+
output_strides.append(None)
|
| 712 |
+
|
| 713 |
+
metrics_helper = metrics.CachedMetricsHelper()
|
| 714 |
+
compiled_fn = graph.compile_to_fn()
|
| 715 |
+
|
| 716 |
+
if V.aot_compilation is True:
|
| 717 |
+
return compiled_fn
|
| 718 |
+
|
| 719 |
+
if cudagraphs and not V.graph.disable_cudagraphs_reason:
|
| 720 |
+
from torch._inductor.cudagraph_utils import (
|
| 721 |
+
check_lowering_disable_cudagraph,
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
V.graph.disable_cudagraphs_reason = check_lowering_disable_cudagraph(
|
| 725 |
+
V.graph.device_node_mapping
|
| 726 |
+
)
|
| 727 |
+
|
| 728 |
+
compiled_graph = CompiledFxGraph(
|
| 729 |
+
compiled_fn,
|
| 730 |
+
graph,
|
| 731 |
+
output_strides,
|
| 732 |
+
V.graph.disable_cudagraphs_reason,
|
| 733 |
+
metrics_helper.get_deltas(),
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
return compiled_graph
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
def clone_preserve_strides(x: torch.Tensor):
|
| 740 |
+
needed_size = (
|
| 741 |
+
sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
|
| 742 |
+
)
|
| 743 |
+
buffer = torch.as_strided(x, (needed_size,), (1,)).clone()
|
| 744 |
+
return torch.as_strided(buffer, x.size(), x.stride())
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
def copy_misaligned_inputs(
|
| 748 |
+
new_inputs: List[torch.Tensor], check_inputs_idxs: Sequence[int]
|
| 749 |
+
) -> None:
|
| 750 |
+
for i in check_inputs_idxs:
|
| 751 |
+
if new_inputs[i].data_ptr() % ALIGNMENT:
|
| 752 |
+
new_inputs[i] = clone_preserve_strides(new_inputs[i])
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def get_input_idxs_to_check(
|
| 756 |
+
inputs: Union[List[torch.Tensor], Sequence[int]],
|
| 757 |
+
static_input_idxs: Sequence[int],
|
| 758 |
+
) -> Sequence[int]:
|
| 759 |
+
def is_aligned(storage_offset, dtype):
|
| 760 |
+
return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0
|
| 761 |
+
|
| 762 |
+
ids_to_check = []
|
| 763 |
+
for i, input in enumerate(inputs):
|
| 764 |
+
if (
|
| 765 |
+
isinstance(input, torch.Tensor)
|
| 766 |
+
and (
|
| 767 |
+
i not in static_input_idxs
|
| 768 |
+
or not is_aligned(input.storage_offset(), input.dtype)
|
| 769 |
+
)
|
| 770 |
+
and input.device.type == "cuda"
|
| 771 |
+
):
|
| 772 |
+
ids_to_check.append(i)
|
| 773 |
+
return ids_to_check
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def align_inputs_from_check_idxs(
|
| 777 |
+
model: Callable[[List[torch.Tensor]], Any], inputs_to_check: Sequence[int]
|
| 778 |
+
):
|
| 779 |
+
if len(inputs_to_check) == 0:
|
| 780 |
+
return model
|
| 781 |
+
|
| 782 |
+
def run(new_inputs):
|
| 783 |
+
copy_misaligned_inputs(new_inputs, inputs_to_check)
|
| 784 |
+
return model(new_inputs)
|
| 785 |
+
|
| 786 |
+
return run
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
def align_inputs(
|
| 790 |
+
model: Callable[[List[torch.Tensor]], Any],
|
| 791 |
+
inputs: List[torch.Tensor],
|
| 792 |
+
static_input_idxs: Sequence[int] = (),
|
| 793 |
+
):
|
| 794 |
+
inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 795 |
+
return align_inputs_from_check_idxs(model, inputs_to_check)
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
@dynamo_utils.dynamo_timed
|
| 799 |
+
def cudagraphify(
|
| 800 |
+
model: torch.fx.GraphModule,
|
| 801 |
+
inputs: List[torch.Tensor],
|
| 802 |
+
static_input_idxs: Sequence[int] = (),
|
| 803 |
+
*,
|
| 804 |
+
device_index: int,
|
| 805 |
+
stack_traces: List[Optional[str]],
|
| 806 |
+
is_backward: bool,
|
| 807 |
+
is_inference: bool,
|
| 808 |
+
constants: Tuple[torch.Tensor, ...] = (),
|
| 809 |
+
):
|
| 810 |
+
from torch._inductor.cudagraph_trees import (
|
| 811 |
+
cudagraphify_impl as new_cudagraphify_impl,
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
cudagraphify_fn: Callable[..., Any]
|
| 815 |
+
if config.triton.cudagraph_trees:
|
| 816 |
+
cudagraphify_fn = functools.partial(
|
| 817 |
+
new_cudagraphify_impl,
|
| 818 |
+
device_index=device_index,
|
| 819 |
+
stack_traces=stack_traces,
|
| 820 |
+
is_backward=is_backward,
|
| 821 |
+
is_inference=is_inference,
|
| 822 |
+
constants=constants,
|
| 823 |
+
)
|
| 824 |
+
else:
|
| 825 |
+
cudagraphify_fn = cudagraphify_impl
|
| 826 |
+
|
| 827 |
+
# if using fake tensors, defer cudagraphs until we get real inputs at runtime
|
| 828 |
+
if not any(isinstance(inp, FakeTensor) for inp in inputs):
|
| 829 |
+
return cudagraphify_fn(model, inputs, static_input_idxs)
|
| 830 |
+
|
| 831 |
+
compiled_fn = None
|
| 832 |
+
|
| 833 |
+
def run(new_inputs):
|
| 834 |
+
nonlocal compiled_fn
|
| 835 |
+
if compiled_fn is None:
|
| 836 |
+
with dynamo_utils.preserve_rng_state():
|
| 837 |
+
compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs)
|
| 838 |
+
return compiled_fn(new_inputs)
|
| 839 |
+
|
| 840 |
+
return run
|
| 841 |
+
|
| 842 |
+
|
| 843 |
+
def remove_unaligned_input_idxs(
|
| 844 |
+
inputs: Union[List[torch.Tensor], Sequence[int]],
|
| 845 |
+
static_input_idxs: Sequence[int],
|
| 846 |
+
):
|
| 847 |
+
"""
|
| 848 |
+
We require all inputs to be aligned, so introduce a copy for any
|
| 849 |
+
that aren't.
|
| 850 |
+
"""
|
| 851 |
+
aligned_static_input_idxs = []
|
| 852 |
+
for idx, input in zip(static_input_idxs, inputs):
|
| 853 |
+
if isinstance(input, torch.Tensor) and (input.data_ptr() % ALIGNMENT) == 0:
|
| 854 |
+
aligned_static_input_idxs.append(idx)
|
| 855 |
+
if len(aligned_static_input_idxs) != len(static_input_idxs):
|
| 856 |
+
return aligned_static_input_idxs
|
| 857 |
+
return static_input_idxs
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def static_input(x: torch.Tensor):
|
| 861 |
+
"""
|
| 862 |
+
Copy and input while preserving strides
|
| 863 |
+
"""
|
| 864 |
+
# TODO(jansel): figure out why this version doesn't work:
|
| 865 |
+
# return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)
|
| 866 |
+
needed_size = (
|
| 867 |
+
sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
|
| 868 |
+
)
|
| 869 |
+
buffer = torch.empty(needed_size, dtype=x.dtype, device=x.device)
|
| 870 |
+
return torch.as_strided(buffer, x.size(), x.stride())
|
| 871 |
+
|
| 872 |
+
|
| 873 |
+
def index_expanded_dims_and_copy_(
|
| 874 |
+
dst: torch.Tensor,
|
| 875 |
+
src: torch.Tensor,
|
| 876 |
+
expanded_dims: List[int],
|
| 877 |
+
):
|
| 878 |
+
"Index into expanded dimensions of both dst and src then copy_"
|
| 879 |
+
dst = index_expanded_dims(dst, expanded_dims)
|
| 880 |
+
src = index_expanded_dims(src, expanded_dims)
|
| 881 |
+
dst.copy_(src)
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
def cudagraphify_impl(
|
| 885 |
+
model: torch.fx.GraphModule,
|
| 886 |
+
inputs: List[torch.Tensor],
|
| 887 |
+
static_input_idxs: Sequence[int] = (),
|
| 888 |
+
):
|
| 889 |
+
"""
|
| 890 |
+
Assumes inputs[static_input_idxs[i]] are always the same memory address
|
| 891 |
+
"""
|
| 892 |
+
check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 893 |
+
static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
|
| 894 |
+
copy_misaligned_inputs(inputs, check_input_idxs)
|
| 895 |
+
|
| 896 |
+
assert isinstance(inputs, list)
|
| 897 |
+
|
| 898 |
+
inps_expanded_dims = [
|
| 899 |
+
get_expanded_dims(x) if idx not in static_input_idxs else []
|
| 900 |
+
for idx, x in enumerate(inputs)
|
| 901 |
+
]
|
| 902 |
+
|
| 903 |
+
# allocate static tensor inputs
|
| 904 |
+
static_inputs = [
|
| 905 |
+
x
|
| 906 |
+
if not isinstance(x, torch.Tensor)
|
| 907 |
+
else static_input(x)
|
| 908 |
+
if idx not in static_input_idxs
|
| 909 |
+
else x.detach()
|
| 910 |
+
for idx, x in enumerate(inputs)
|
| 911 |
+
]
|
| 912 |
+
|
| 913 |
+
# copy over input values for fresh allocations
|
| 914 |
+
for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)):
|
| 915 |
+
if isinstance(x, torch.Tensor) and idx not in static_input_idxs:
|
| 916 |
+
index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims)
|
| 917 |
+
|
| 918 |
+
# warmup
|
| 919 |
+
torch.cuda.synchronize()
|
| 920 |
+
stream = torch.cuda.Stream()
|
| 921 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 922 |
+
# copy static_inputs because it will be cleared in model
|
| 923 |
+
with torch.cuda.stream(stream):
|
| 924 |
+
model(list(static_inputs))
|
| 925 |
+
stream.synchronize()
|
| 926 |
+
torch.cuda.current_stream().wait_stream(stream)
|
| 927 |
+
torch.cuda.synchronize()
|
| 928 |
+
|
| 929 |
+
# record
|
| 930 |
+
graph = torch.cuda.CUDAGraph()
|
| 931 |
+
with torch.cuda.graph(graph, stream=stream, capture_error_mode="thread_local"):
|
| 932 |
+
static_outputs = model(list(static_inputs))
|
| 933 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 934 |
+
static_outputs = (static_outputs,)
|
| 935 |
+
|
| 936 |
+
if config.size_asserts:
|
| 937 |
+
|
| 938 |
+
def run(new_inputs):
|
| 939 |
+
assert len(static_inputs) == len(new_inputs)
|
| 940 |
+
for idx, (dst, src, expanded_dims) in enumerate(
|
| 941 |
+
zip(static_inputs, new_inputs, inps_expanded_dims)
|
| 942 |
+
):
|
| 943 |
+
if not isinstance(dst, torch.Tensor):
|
| 944 |
+
pass
|
| 945 |
+
elif idx in static_input_idxs:
|
| 946 |
+
assert dst.data_ptr() == src.data_ptr()
|
| 947 |
+
else:
|
| 948 |
+
# TODO - could make one single op of multiple slices
|
| 949 |
+
# and avoid dispatch.
|
| 950 |
+
# Could also pre-index the `dst` tensors
|
| 951 |
+
index_expanded_dims_and_copy_(dst, src, expanded_dims)
|
| 952 |
+
new_inputs.clear()
|
| 953 |
+
graph.replay()
|
| 954 |
+
return static_outputs
|
| 955 |
+
|
| 956 |
+
else:
|
| 957 |
+
copy_indices = [
|
| 958 |
+
idx for idx in range(len(static_inputs)) if idx not in static_input_idxs
|
| 959 |
+
]
|
| 960 |
+
|
| 961 |
+
def run(new_inputs):
|
| 962 |
+
for idx in copy_indices:
|
| 963 |
+
expanded_dims = inps_expanded_dims[idx]
|
| 964 |
+
index_expanded_dims_and_copy_(
|
| 965 |
+
static_inputs[idx], new_inputs[idx], expanded_dims
|
| 966 |
+
)
|
| 967 |
+
new_inputs.clear()
|
| 968 |
+
graph.replay()
|
| 969 |
+
return static_outputs
|
| 970 |
+
|
| 971 |
+
return align_inputs_from_check_idxs(run, check_input_idxs)
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
def compile_fx_aot(
|
| 975 |
+
model_: torch.fx.GraphModule,
|
| 976 |
+
example_inputs_: List[torch.Tensor],
|
| 977 |
+
inner_compile: Callable[..., Any] = compile_fx_inner,
|
| 978 |
+
config_patches: Optional[Dict[str, Any]] = None,
|
| 979 |
+
):
|
| 980 |
+
config_patches: Dict[str, Any] = (
|
| 981 |
+
{"cpp_wrapper": True}
|
| 982 |
+
if config_patches is None
|
| 983 |
+
else {**config_patches, "cpp_wrapper": True}
|
| 984 |
+
)
|
| 985 |
+
if (
|
| 986 |
+
"aot_inductor.output_path" not in config_patches
|
| 987 |
+
and not config.aot_inductor.output_path
|
| 988 |
+
):
|
| 989 |
+
config_patches = {
|
| 990 |
+
**config_patches,
|
| 991 |
+
"aot_inductor.output_path": code_hash(model_.code),
|
| 992 |
+
}
|
| 993 |
+
|
| 994 |
+
extern_node_serializer = config_patches.pop("extern_node_serializer", None)
|
| 995 |
+
with V.set_aot_compilation(True):
|
| 996 |
+
compiled_lib_path = compile_fx(
|
| 997 |
+
model_,
|
| 998 |
+
example_inputs_,
|
| 999 |
+
inner_compile=functools.partial(
|
| 1000 |
+
inner_compile,
|
| 1001 |
+
aot_mode=True,
|
| 1002 |
+
extern_node_serializer=extern_node_serializer,
|
| 1003 |
+
),
|
| 1004 |
+
config_patches=config_patches,
|
| 1005 |
+
)
|
| 1006 |
+
assert os.path.exists(
|
| 1007 |
+
compiled_lib_path
|
| 1008 |
+
), f"AOTInductor compiled library does not exist at {compiled_lib_path}"
|
| 1009 |
+
return compiled_lib_path
|
| 1010 |
+
|
| 1011 |
+
|
| 1012 |
+
_graph_counter = count(0)
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def fw_compiler_freezing(
|
| 1016 |
+
aot_autograd_model: torch.fx.GraphModule,
|
| 1017 |
+
aot_example_inputs: List[torch.Tensor],
|
| 1018 |
+
dynamo_model: torch.fx.GraphModule,
|
| 1019 |
+
num_example_inputs: int,
|
| 1020 |
+
inner_compile: Callable[..., Any],
|
| 1021 |
+
cudagraphs: BoxedBool,
|
| 1022 |
+
graph_id: int,
|
| 1023 |
+
forward_device: BoxedDeviceIndex,
|
| 1024 |
+
):
|
| 1025 |
+
from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze
|
| 1026 |
+
|
| 1027 |
+
# partition_fn won't be called
|
| 1028 |
+
_recursive_joint_graph_passes(aot_autograd_model)
|
| 1029 |
+
|
| 1030 |
+
layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model, is_inference=True)
|
| 1031 |
+
if layout_opt:
|
| 1032 |
+
# make sure meta['val'] is properly setup
|
| 1033 |
+
fake_tensor_prop(aot_autograd_model, aot_example_inputs, True)
|
| 1034 |
+
convert_conv_weights_to_channels_last(aot_autograd_model)
|
| 1035 |
+
|
| 1036 |
+
opt_model, preserved_arg_indices = freeze(
|
| 1037 |
+
dynamo_model,
|
| 1038 |
+
aot_autograd_model,
|
| 1039 |
+
aot_example_inputs, # type: ignore[arg-type]
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices]
|
| 1043 |
+
num_fixed = len(preserved_arg_indices) - num_example_inputs
|
| 1044 |
+
|
| 1045 |
+
fake_mode = detect_fake_mode(aot_example_inputs)
|
| 1046 |
+
|
| 1047 |
+
# for freezing, all graph outputs should be user visible
|
| 1048 |
+
*_, model_outputs_node = opt_model.graph.nodes
|
| 1049 |
+
model_outputs = model_outputs_node.args[0]
|
| 1050 |
+
user_visible_outputs = [
|
| 1051 |
+
n.name for n in model_outputs if isinstance(n, torch.fx.Node)
|
| 1052 |
+
]
|
| 1053 |
+
|
| 1054 |
+
# constant params will be real tensors, not fake
|
| 1055 |
+
tracing_context = torch._guards.TracingContext.try_get()
|
| 1056 |
+
if tracing_context is not None:
|
| 1057 |
+
params_flat = tracing_context.params_flat
|
| 1058 |
+
assert params_flat is not None
|
| 1059 |
+
for i in range(len(params_flat)):
|
| 1060 |
+
if i not in preserved_arg_indices:
|
| 1061 |
+
params_flat[i] = None
|
| 1062 |
+
|
| 1063 |
+
with mock.patch.object(fake_mode, "allow_non_fake_inputs", True):
|
| 1064 |
+
optimized_function = inner_compile(
|
| 1065 |
+
opt_model,
|
| 1066 |
+
aot_example_inputs,
|
| 1067 |
+
num_fixed=num_fixed,
|
| 1068 |
+
cudagraphs=cudagraphs,
|
| 1069 |
+
graph_id=graph_id,
|
| 1070 |
+
is_inference=True,
|
| 1071 |
+
boxed_forward_device_index=forward_device,
|
| 1072 |
+
layout_opt=layout_opt,
|
| 1073 |
+
user_visible_outputs=user_visible_outputs,
|
| 1074 |
+
)
|
| 1075 |
+
|
| 1076 |
+
# aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper
|
| 1077 |
+
# that drops constant-ified params
|
| 1078 |
+
if V.aot_compilation is True:
|
| 1079 |
+
return optimized_function
|
| 1080 |
+
|
| 1081 |
+
def wrapper(args):
|
| 1082 |
+
args_new = [args[i] for i in preserved_arg_indices]
|
| 1083 |
+
args.clear()
|
| 1084 |
+
return optimized_function(args_new)
|
| 1085 |
+
|
| 1086 |
+
wrapper._boxed_call = True # type: ignore[attr-defined]
|
| 1087 |
+
|
| 1088 |
+
return wrapper
|
| 1089 |
+
|
| 1090 |
+
|
| 1091 |
+
@_use_lazy_graph_module(dynamo_config.use_lazy_graph_module)
|
| 1092 |
+
def compile_fx(
|
| 1093 |
+
model_: torch.fx.GraphModule,
|
| 1094 |
+
example_inputs_: List[torch.Tensor],
|
| 1095 |
+
inner_compile: Callable[..., Any] = compile_fx_inner,
|
| 1096 |
+
config_patches: Optional[Dict[str, Any]] = None,
|
| 1097 |
+
decompositions: Optional[Dict[OpOverload, Callable[..., Any]]] = None,
|
| 1098 |
+
):
|
| 1099 |
+
"""Main entrypoint to a compile given FX graph"""
|
| 1100 |
+
if config_patches:
|
| 1101 |
+
with config.patch(config_patches):
|
| 1102 |
+
return compile_fx(
|
| 1103 |
+
model_,
|
| 1104 |
+
example_inputs_,
|
| 1105 |
+
# need extra layer of patching as backwards is compiled out of scope
|
| 1106 |
+
inner_compile=config.patch(config_patches)(inner_compile),
|
| 1107 |
+
decompositions=decompositions,
|
| 1108 |
+
)
|
| 1109 |
+
|
| 1110 |
+
if config.cpp_wrapper:
|
| 1111 |
+
with config.patch(
|
| 1112 |
+
{
|
| 1113 |
+
"cpp_wrapper": False,
|
| 1114 |
+
"triton.autotune_cublasLt": False,
|
| 1115 |
+
"triton.cudagraphs": False,
|
| 1116 |
+
"triton.store_cubin": True,
|
| 1117 |
+
}
|
| 1118 |
+
), V.set_real_inputs(example_inputs_):
|
| 1119 |
+
inputs_ = example_inputs_
|
| 1120 |
+
if isinstance(model_, torch.fx.GraphModule):
|
| 1121 |
+
fake_inputs = [
|
| 1122 |
+
node.meta.get("val")
|
| 1123 |
+
for node in model_.graph.nodes
|
| 1124 |
+
if node.op == "placeholder"
|
| 1125 |
+
]
|
| 1126 |
+
if all(v is not None for v in fake_inputs):
|
| 1127 |
+
# Validate devices before switching to fake tensors.
|
| 1128 |
+
for idx, fi, i in zip(count(), fake_inputs, inputs_):
|
| 1129 |
+
if fi.device != i.device:
|
| 1130 |
+
raise ValueError(
|
| 1131 |
+
f"Device mismatch between fake input and example input at position #{idx}: "
|
| 1132 |
+
f"{fi.device} vs {i.device}. If the model was exported via torch.export(), "
|
| 1133 |
+
"make sure torch.export() and torch.aot_compile() run on the same device."
|
| 1134 |
+
)
|
| 1135 |
+
inputs_ = fake_inputs
|
| 1136 |
+
return compile_fx(
|
| 1137 |
+
model_,
|
| 1138 |
+
inputs_,
|
| 1139 |
+
inner_compile=functools.partial(inner_compile, cpp_wrapper=True),
|
| 1140 |
+
decompositions=decompositions,
|
| 1141 |
+
)
|
| 1142 |
+
|
| 1143 |
+
recursive_compile_fx = functools.partial(
|
| 1144 |
+
compile_fx,
|
| 1145 |
+
inner_compile=inner_compile,
|
| 1146 |
+
decompositions=decompositions,
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
if not graph_returns_tuple(model_):
|
| 1150 |
+
return make_graph_return_tuple(
|
| 1151 |
+
model_,
|
| 1152 |
+
example_inputs_,
|
| 1153 |
+
recursive_compile_fx,
|
| 1154 |
+
)
|
| 1155 |
+
|
| 1156 |
+
if isinstance(model_, torch.fx.GraphModule):
|
| 1157 |
+
if isinstance(model_.graph._codegen, _PyTreeCodeGen):
|
| 1158 |
+
# this graph is the result of dynamo.export()
|
| 1159 |
+
return handle_dynamo_export_graph(
|
| 1160 |
+
model_,
|
| 1161 |
+
example_inputs_,
|
| 1162 |
+
recursive_compile_fx,
|
| 1163 |
+
)
|
| 1164 |
+
|
| 1165 |
+
model_ = _recursive_pre_grad_passes(model_, example_inputs_)
|
| 1166 |
+
optimus_scuba_log["inductor_pre_grad"] = counters["inductor"]
|
| 1167 |
+
signpost_event(
|
| 1168 |
+
"optimus",
|
| 1169 |
+
"compile_fx.pre_grad_passes",
|
| 1170 |
+
optimus_scuba_log,
|
| 1171 |
+
)
|
| 1172 |
+
|
| 1173 |
+
if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):
|
| 1174 |
+
return flatten_graph_inputs(
|
| 1175 |
+
model_,
|
| 1176 |
+
example_inputs_,
|
| 1177 |
+
recursive_compile_fx,
|
| 1178 |
+
)
|
| 1179 |
+
|
| 1180 |
+
assert not config._raise_error_for_testing
|
| 1181 |
+
num_example_inputs = len(example_inputs_)
|
| 1182 |
+
cudagraphs = BoxedBool(config.triton.cudagraphs)
|
| 1183 |
+
forward_device = BoxedDeviceIndex(None)
|
| 1184 |
+
|
| 1185 |
+
graph_id = next(_graph_counter)
|
| 1186 |
+
|
| 1187 |
+
decompositions = (
|
| 1188 |
+
decompositions if decompositions is not None else select_decomp_table()
|
| 1189 |
+
)
|
| 1190 |
+
|
| 1191 |
+
@dynamo_utils.dynamo_timed
|
| 1192 |
+
def fw_compiler_base(
|
| 1193 |
+
model: torch.fx.GraphModule,
|
| 1194 |
+
example_inputs: List[torch.Tensor],
|
| 1195 |
+
is_inference: bool,
|
| 1196 |
+
):
|
| 1197 |
+
if is_inference:
|
| 1198 |
+
# partition_fn won't be called
|
| 1199 |
+
_recursive_joint_graph_passes(model)
|
| 1200 |
+
|
| 1201 |
+
fixed = torch._inductor.utils.num_fw_fixed_arguments(
|
| 1202 |
+
num_example_inputs, len(example_inputs)
|
| 1203 |
+
)
|
| 1204 |
+
user_visible_outputs = set()
|
| 1205 |
+
|
| 1206 |
+
if config.keep_output_stride:
|
| 1207 |
+
*_, model_outputs_node = model.graph.nodes
|
| 1208 |
+
assert model_outputs_node.op == "output"
|
| 1209 |
+
model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
|
| 1210 |
+
num_model_outputs = len(model_outputs)
|
| 1211 |
+
|
| 1212 |
+
context = torch._guards.TracingContext.try_get()
|
| 1213 |
+
# See Note [User Outputs in the inductor graph]
|
| 1214 |
+
if context is not None and context.fw_metadata and not is_inference:
|
| 1215 |
+
original_output_start_index = (
|
| 1216 |
+
context.fw_metadata.num_mutated_inp_runtime_indices
|
| 1217 |
+
)
|
| 1218 |
+
else:
|
| 1219 |
+
original_output_start_index = 0
|
| 1220 |
+
|
| 1221 |
+
if isinstance(model_, torch.fx.GraphModule):
|
| 1222 |
+
*_, orig_model_outputs_node = model_.graph.nodes
|
| 1223 |
+
assert orig_model_outputs_node.op == "output"
|
| 1224 |
+
orig_model_outputs, _ = pytree.tree_flatten(
|
| 1225 |
+
orig_model_outputs_node.args
|
| 1226 |
+
)
|
| 1227 |
+
num_orig_model_outputs = len(orig_model_outputs)
|
| 1228 |
+
else:
|
| 1229 |
+
num_orig_model_outputs = num_model_outputs
|
| 1230 |
+
|
| 1231 |
+
assert num_orig_model_outputs <= num_model_outputs
|
| 1232 |
+
|
| 1233 |
+
# Note [User Outputs in the inductor graph]
|
| 1234 |
+
# We makes the following assumption
|
| 1235 |
+
# For inference
|
| 1236 |
+
# len(orig_model_outputs) == len(model_outputs)
|
| 1237 |
+
# For training
|
| 1238 |
+
# len(orig_model_outputs) <= len(model_outputs)
|
| 1239 |
+
# During training, most of the time the model_outputs starts with
|
| 1240 |
+
# original module's outputs followed by saved activations.
|
| 1241 |
+
# But this can be not true if the model have inplace updated tensors.
|
| 1242 |
+
# AOTAutograd will make those tensors being returned before the original
|
| 1243 |
+
# module's output.
|
| 1244 |
+
# To make things safe, we'll use original_output_start_index field
|
| 1245 |
+
# set by AOTAutograd to decide where the original module outputs start.
|
| 1246 |
+
orig_output_end_idx = original_output_start_index + num_orig_model_outputs
|
| 1247 |
+
# Sanity chec: we are about to splice out the "user" outputs from the full set
|
| 1248 |
+
# of "graph" outputs. Make sure we're within bounds.
|
| 1249 |
+
assert orig_output_end_idx <= num_model_outputs
|
| 1250 |
+
|
| 1251 |
+
user_visible_outputs = {
|
| 1252 |
+
n.name
|
| 1253 |
+
for n in model_outputs[original_output_start_index:orig_output_end_idx]
|
| 1254 |
+
if isinstance(n, torch.fx.Node)
|
| 1255 |
+
}
|
| 1256 |
+
|
| 1257 |
+
return inner_compile(
|
| 1258 |
+
model,
|
| 1259 |
+
example_inputs,
|
| 1260 |
+
num_fixed=fixed,
|
| 1261 |
+
cudagraphs=cudagraphs,
|
| 1262 |
+
graph_id=graph_id,
|
| 1263 |
+
is_inference=is_inference,
|
| 1264 |
+
boxed_forward_device_index=forward_device,
|
| 1265 |
+
user_visible_outputs=user_visible_outputs,
|
| 1266 |
+
)
|
| 1267 |
+
|
| 1268 |
+
fw_compiler = functools.partial(fw_compiler_base, is_inference=False)
|
| 1269 |
+
|
| 1270 |
+
if config.freezing and not torch.is_grad_enabled():
|
| 1271 |
+
inference_compiler = functools.partial(
|
| 1272 |
+
fw_compiler_freezing,
|
| 1273 |
+
dynamo_model=model_,
|
| 1274 |
+
num_example_inputs=num_example_inputs,
|
| 1275 |
+
inner_compile=inner_compile,
|
| 1276 |
+
cudagraphs=cudagraphs,
|
| 1277 |
+
graph_id=graph_id,
|
| 1278 |
+
forward_device=forward_device,
|
| 1279 |
+
)
|
| 1280 |
+
else:
|
| 1281 |
+
inference_compiler = functools.partial(fw_compiler_base, is_inference=True)
|
| 1282 |
+
|
| 1283 |
+
def partition_fn(graph, joint_inputs, **kwargs):
|
| 1284 |
+
_recursive_joint_graph_passes(graph)
|
| 1285 |
+
return min_cut_rematerialization_partition(
|
| 1286 |
+
graph, joint_inputs, **kwargs, compiler="inductor"
|
| 1287 |
+
)
|
| 1288 |
+
|
| 1289 |
+
@dynamo_utils.dynamo_timed
|
| 1290 |
+
@dynamo_utils.maybe_cprofile
|
| 1291 |
+
def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
|
| 1292 |
+
fixed = count_tangents(model)
|
| 1293 |
+
return inner_compile(
|
| 1294 |
+
model,
|
| 1295 |
+
example_inputs,
|
| 1296 |
+
num_fixed=fixed,
|
| 1297 |
+
cudagraphs=cudagraphs,
|
| 1298 |
+
is_backward=True,
|
| 1299 |
+
graph_id=graph_id,
|
| 1300 |
+
boxed_forward_device_index=forward_device,
|
| 1301 |
+
)
|
| 1302 |
+
|
| 1303 |
+
# TODO: can add logging before/after the call to create_aot_dispatcher_function
|
| 1304 |
+
# in torch._functorch/aot_autograd.py::aot_module_simplified::aot_function_simplified::new_func
|
| 1305 |
+
# once torchdynamo is merged into pytorch
|
| 1306 |
+
|
| 1307 |
+
fake_mode = detect_fake_mode(example_inputs_) or torch._subclasses.FakeTensorMode(
|
| 1308 |
+
allow_non_fake_inputs=True
|
| 1309 |
+
)
|
| 1310 |
+
tracing_context = (
|
| 1311 |
+
torch._guards.TracingContext.try_get()
|
| 1312 |
+
or torch._guards.TracingContext(fake_mode)
|
| 1313 |
+
)
|
| 1314 |
+
|
| 1315 |
+
if V.aot_compilation is True:
|
| 1316 |
+
gm, graph_signature = aot_export_module(
|
| 1317 |
+
model_, example_inputs_, trace_joint=False, decompositions=decompositions
|
| 1318 |
+
)
|
| 1319 |
+
unlifted_gm = _unlift_graph(model_, gm, graph_signature)
|
| 1320 |
+
if "dynamo_flat_name_to_original_fqn" in model_.meta:
|
| 1321 |
+
unlifted_gm.meta["dynamo_flat_name_to_original_fqn"] = model_.meta[
|
| 1322 |
+
"dynamo_flat_name_to_original_fqn"
|
| 1323 |
+
]
|
| 1324 |
+
with V.set_fake_mode(fake_mode), compiled_autograd.disable():
|
| 1325 |
+
return inference_compiler(unlifted_gm, example_inputs_)
|
| 1326 |
+
|
| 1327 |
+
with V.set_fake_mode(fake_mode), torch._guards.tracing(
|
| 1328 |
+
tracing_context
|
| 1329 |
+
), compiled_autograd.disable():
|
| 1330 |
+
return aot_autograd(
|
| 1331 |
+
fw_compiler=fw_compiler,
|
| 1332 |
+
bw_compiler=bw_compiler,
|
| 1333 |
+
inference_compiler=inference_compiler,
|
| 1334 |
+
decompositions=decompositions,
|
| 1335 |
+
partition_fn=partition_fn,
|
| 1336 |
+
keep_inference_input_mutations=True,
|
| 1337 |
+
)(model_, example_inputs_)
|
| 1338 |
+
|
| 1339 |
+
|
| 1340 |
+
def _shape_env_from_inputs(inputs: List[torch.Tensor]):
|
| 1341 |
+
shape_env = None
|
| 1342 |
+
fake_mode = detect_fake_mode(inputs)
|
| 1343 |
+
|
| 1344 |
+
# TODO(voz): It would be nice to enable this assert, but there are lots of tests that
|
| 1345 |
+
# pass in real inputs for now.
|
| 1346 |
+
# if len(inputs) > 0:
|
| 1347 |
+
# assert fake_mode is not None, breakpoint()
|
| 1348 |
+
|
| 1349 |
+
if fake_mode is not None:
|
| 1350 |
+
return fake_mode.shape_env
|
| 1351 |
+
|
| 1352 |
+
# When there are no tensor inputs, get shape_env from the first SymInt.
|
| 1353 |
+
for input in inputs:
|
| 1354 |
+
if isinstance(input, torch.SymInt):
|
| 1355 |
+
return input.node.shape_env
|
| 1356 |
+
|
| 1357 |
+
# TODO(voz): Should we always have one anyway?
|
| 1358 |
+
return None
|
| 1359 |
+
|
| 1360 |
+
|
| 1361 |
+
def graph_returns_tuple(gm: torch.fx.GraphModule):
|
| 1362 |
+
"""True if a FX graph returns a tuple"""
|
| 1363 |
+
if not isinstance(gm, torch.fx.GraphModule):
|
| 1364 |
+
return True # can't check this, assume true
|
| 1365 |
+
(rv,) = output_node(gm).args
|
| 1366 |
+
if isinstance(rv, (list, tuple)):
|
| 1367 |
+
return True
|
| 1368 |
+
if (
|
| 1369 |
+
isinstance(rv, torch.fx.node.Node)
|
| 1370 |
+
and hasattr(rv.target, "_schema")
|
| 1371 |
+
and len(rv.target._schema.returns) > 1
|
| 1372 |
+
and all(str(ret.type) == "Tensor" for ret in rv.target._schema.returns)
|
| 1373 |
+
):
|
| 1374 |
+
# for graphs whose result is one node with multiple outputs
|
| 1375 |
+
return True
|
| 1376 |
+
return False
|
| 1377 |
+
|
| 1378 |
+
|
| 1379 |
+
def make_graph_return_tuple(
|
| 1380 |
+
gm: torch.fx.GraphModule,
|
| 1381 |
+
inputs: List[torch.Tensor],
|
| 1382 |
+
compile_gm: Callable[..., Any],
|
| 1383 |
+
):
|
| 1384 |
+
"""
|
| 1385 |
+
Mutate gm so it returns a tuple. This is only needed for graphs
|
| 1386 |
+
not created by torchdynamo that return non-tuples.
|
| 1387 |
+
"""
|
| 1388 |
+
node = output_node(gm)
|
| 1389 |
+
(rv,) = node.args
|
| 1390 |
+
rv, spec = pytree.tree_flatten(rv)
|
| 1391 |
+
with gm.graph.inserting_before(node):
|
| 1392 |
+
gm.graph.output(rv)
|
| 1393 |
+
gm.graph.erase_node(node)
|
| 1394 |
+
assert graph_returns_tuple(gm)
|
| 1395 |
+
|
| 1396 |
+
compiled_fn = compile_gm(gm, inputs)
|
| 1397 |
+
|
| 1398 |
+
@functools.wraps(compiled_fn)
|
| 1399 |
+
def wrapper(*args, **kwargs):
|
| 1400 |
+
return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)
|
| 1401 |
+
|
| 1402 |
+
return wrapper
|
| 1403 |
+
|
| 1404 |
+
|
| 1405 |
+
def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
|
| 1406 |
+
"""
|
| 1407 |
+
Mutate inputs so that they are flat and wrap gm such that it
|
| 1408 |
+
accepts those inputs. This is only needed for graphs not created
|
| 1409 |
+
by torchdynamo that take bumpy inputs.
|
| 1410 |
+
"""
|
| 1411 |
+
inputs, spec = pytree.tree_flatten(inputs)
|
| 1412 |
+
|
| 1413 |
+
class GmWrapper(torch.nn.Module):
|
| 1414 |
+
def __init__(self):
|
| 1415 |
+
super().__init__()
|
| 1416 |
+
self.gm = gm
|
| 1417 |
+
|
| 1418 |
+
def forward(self, *args):
|
| 1419 |
+
args: List[Any] = list(args)
|
| 1420 |
+
return self.gm(*pytree.tree_unflatten(args, spec))
|
| 1421 |
+
|
| 1422 |
+
compiled_fn = compile_gm(GmWrapper(), inputs)
|
| 1423 |
+
|
| 1424 |
+
@functools.wraps(compiled_fn)
|
| 1425 |
+
def wrapper(*args):
|
| 1426 |
+
# note this doesn't check the spec, assuming it is the same
|
| 1427 |
+
return compiled_fn(*pytree.arg_tree_leaves(*args))
|
| 1428 |
+
|
| 1429 |
+
return wrapper
|
| 1430 |
+
|
| 1431 |
+
|
| 1432 |
+
def handle_dynamo_export_graph(
|
| 1433 |
+
gm: torch.fx.GraphModule,
|
| 1434 |
+
inputs: List[torch.Tensor],
|
| 1435 |
+
compile_gm: Callable[..., Any],
|
| 1436 |
+
):
|
| 1437 |
+
"""
|
| 1438 |
+
`torch._dynamo.export` embeds pytrees in the FX graph codegen object,
|
| 1439 |
+
convert that to a normal FX graph so inductor can compile it.
|
| 1440 |
+
"""
|
| 1441 |
+
codegen = gm.graph._codegen
|
| 1442 |
+
gm.graph._codegen = torch.fx.graph.CodeGen()
|
| 1443 |
+
gm.recompile()
|
| 1444 |
+
|
| 1445 |
+
compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))
|
| 1446 |
+
|
| 1447 |
+
@functools.wraps(compiled_fn)
|
| 1448 |
+
def wrapper(*args):
|
| 1449 |
+
return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))
|
| 1450 |
+
|
| 1451 |
+
return wrapper
|
vila/lib/python3.10/site-packages/torch/_inductor/config.py
ADDED
|
@@ -0,0 +1,752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os # noqa: C101
|
| 2 |
+
import sys
|
| 3 |
+
from typing import Any, Callable, Dict, Optional, TYPE_CHECKING
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def is_fbcode():
|
| 9 |
+
return not hasattr(torch.version, "git_version")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# add some debug printouts
|
| 13 |
+
debug = False
|
| 14 |
+
|
| 15 |
+
# add inf and NaN checkers
|
| 16 |
+
debug_check_inf_and_nan = False
|
| 17 |
+
|
| 18 |
+
# Whether to disable a progress bar for autotuning
|
| 19 |
+
disable_progress = True
|
| 20 |
+
|
| 21 |
+
# Whether to enable printing the source code for each future
|
| 22 |
+
verbose_progress = False
|
| 23 |
+
|
| 24 |
+
# use fx aot graph codegen cache
|
| 25 |
+
fx_graph_cache = os.environ.get("TORCHINDUCTOR_FX_GRAPH_CACHE") == "1"
|
| 26 |
+
|
| 27 |
+
# use cpp wrapper instead of python wrapper
|
| 28 |
+
cpp_wrapper = os.environ.get("TORCHINDUCTOR_CPP_WRAPPER", "0") == "1"
|
| 29 |
+
|
| 30 |
+
# codegen cpp wrapper code in an ABI compatible mode
|
| 31 |
+
abi_compatible = (
|
| 32 |
+
os.environ.get("TORCHINDUCTOR_ABI_COMPATIBLE", "1" if is_fbcode() else "0") == "1"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
c_shim_version = os.environ.get(
|
| 36 |
+
"TORCHINDUCTOR_C_SHIM_VERSION", "1" if is_fbcode() else "2"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# dead code elimination
|
| 40 |
+
dce = False
|
| 41 |
+
|
| 42 |
+
# assume weight tensors are fixed size
|
| 43 |
+
static_weight_shapes = True
|
| 44 |
+
|
| 45 |
+
# put correctness assertions in generated code
|
| 46 |
+
size_asserts = os.environ.get("TORCHINDUCTOR_SIZE_ASSERTS", "1") == "1"
|
| 47 |
+
nan_asserts = os.environ.get("TORCHINDUCTOR_NAN_ASSERTS") == "1"
|
| 48 |
+
|
| 49 |
+
# enable loop reordering based on input orders
|
| 50 |
+
pick_loop_orders = True
|
| 51 |
+
|
| 52 |
+
# reuse a kernel input as the output
|
| 53 |
+
inplace_buffers = True
|
| 54 |
+
|
| 55 |
+
# reuse a buffer for an unrelated purpose
|
| 56 |
+
allow_buffer_reuse = True
|
| 57 |
+
|
| 58 |
+
# Enable pooled allocations for non-output tensors
|
| 59 |
+
memory_planning = os.environ.get("TORCHINDUCTOR_MEMORY_PLANNING", "0") == "1"
|
| 60 |
+
|
| 61 |
+
# How to organize memory under memory_planning=True:
|
| 62 |
+
# - "none": do not try to pool storage, just reuse
|
| 63 |
+
# - "intermediates": all non-outputs share storage, outputs each get unique storage
|
| 64 |
+
# - "outputs": two pools, one for intermediates (freed on return) and one for outputs
|
| 65 |
+
# - "combined": a single pool for both intermediates and outputs
|
| 66 |
+
memory_pool = os.environ.get("TORCHINDUCTOR_MEMORY_POOL", "intermediates")
|
| 67 |
+
|
| 68 |
+
# codegen benchmark harness
|
| 69 |
+
benchmark_harness = True
|
| 70 |
+
|
| 71 |
+
# fuse pointwise into templates
|
| 72 |
+
epilogue_fusion = True
|
| 73 |
+
|
| 74 |
+
# do epilogue fusions before other fusions
|
| 75 |
+
epilogue_fusion_first = False
|
| 76 |
+
|
| 77 |
+
# enable pattern match+replace optimizations
|
| 78 |
+
pattern_matcher = True
|
| 79 |
+
|
| 80 |
+
# register custom graph optimization pass hook. so far, pre/post passes are
|
| 81 |
+
# only applied before/after pattern_matcher in post_grad_passes.
|
| 82 |
+
#
|
| 83 |
+
# def my_custom_pre_pass(graph: torch.fx.graph.Graph):
|
| 84 |
+
# # my custom graph optimization pass
|
| 85 |
+
# ...
|
| 86 |
+
#
|
| 87 |
+
# def my_custom_post_pass(graph: torch.fx.graph.Graph):
|
| 88 |
+
# # my custom graph optimization pass
|
| 89 |
+
# ...
|
| 90 |
+
#
|
| 91 |
+
# torch._inductor.config.post_grad_custom_pre_pass = my_custom_pre_pass
|
| 92 |
+
# torch._inductor.config.post_grad_custom_post_pass = my_custom_post_pass
|
| 93 |
+
post_grad_custom_pre_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
|
| 94 |
+
post_grad_custom_post_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
|
| 95 |
+
|
| 96 |
+
# Registers a custom pregrad pass. Note that the pre-grad IR is 1.
|
| 97 |
+
# non-functional, 2. non-normalized, and 3. prone to change. Ideally we should
|
| 98 |
+
# use post-grad passes.
|
| 99 |
+
pre_grad_custom_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None
|
| 100 |
+
|
| 101 |
+
# Optimize away split cat patterns (Experimental)
|
| 102 |
+
split_cat_fx_passes = True
|
| 103 |
+
|
| 104 |
+
# Optimize conv-batchnorm if batchnorm is in eval mode. Slightly reduces numerical stability.
|
| 105 |
+
efficient_conv_bn_eval_fx_passes = False
|
| 106 |
+
|
| 107 |
+
# Enable predispatch aten IR for export
|
| 108 |
+
is_predispatch = False
|
| 109 |
+
|
| 110 |
+
# Deprecated
|
| 111 |
+
group_fusion = False
|
| 112 |
+
|
| 113 |
+
# Deprecated
|
| 114 |
+
batch_fusion = True
|
| 115 |
+
|
| 116 |
+
# Pre grad group/batch fusion and options in order, set to empty dict to disable fusion.
|
| 117 |
+
# Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions()` to see available fusions.
|
| 118 |
+
pre_grad_fusion_options: Dict[str, Dict[str, Any]] = {
|
| 119 |
+
"batch_linear": {},
|
| 120 |
+
"batch_linear_lhs": {},
|
| 121 |
+
"batch_layernorm": {},
|
| 122 |
+
"batch_tanh": {},
|
| 123 |
+
"batch_relu": {},
|
| 124 |
+
"batch_sigmoid": {},
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
# Post grad group/batch fusion and options, set to empty dict to disable fusion.
|
| 128 |
+
# Call `torch._inductor.fx_passes.group_batch_fusion.list_group_batch_fusions(False)` to see available fusions.
|
| 129 |
+
post_grad_fusion_options: Dict[str, Dict[str, Any]] = {}
|
| 130 |
+
|
| 131 |
+
# enable reordering pass for improving memory locality
|
| 132 |
+
reorder_for_locality = True
|
| 133 |
+
|
| 134 |
+
# Scale down RBLOCK for better occupancy
|
| 135 |
+
dynamic_scale_rblock = os.environ.get("TORCHINDUCTOR_DYNAMIC_SCALE_RBLOCK", "1") == "1"
|
| 136 |
+
|
| 137 |
+
# this forces fusion for int_mm with mul. Needed when you want to avoid realizing the int32
|
| 138 |
+
# but the mul gets fused with other pointwise ops instead.
|
| 139 |
+
force_fuse_int_mm_with_mul = False
|
| 140 |
+
|
| 141 |
+
# for pattern torch.mm(a, b.to(dtype)) with cuda tensors,
|
| 142 |
+
# enable torch._inductor.kernel.mm.tuned_mixed_mm fused kernel.
|
| 143 |
+
# Autotune will compare perf with normal cast->then->mm option
|
| 144 |
+
use_mixed_mm = False
|
| 145 |
+
|
| 146 |
+
# enable runtime numeric check for pre/post grad fx passes
|
| 147 |
+
# floating point provides limited accuracy (about 7 decimal digits for single precision
|
| 148 |
+
# floating point numbers,about 16 decimal digits for double precision floating point numbers)
|
| 149 |
+
# according to PyTorch documentation.
|
| 150 |
+
# https://pytorch.org/docs/stable/notes/numerical_accuracy.html#batched-computations-or-slice-computations
|
| 151 |
+
fx_passes_numeric_check: Dict[str, Any] = {
|
| 152 |
+
"pre_grad": False,
|
| 153 |
+
"precision": 1e-4,
|
| 154 |
+
"num_iterations": 1,
|
| 155 |
+
"requires_optimizer": True,
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
# for pattern torch.mm(a, b.to(dtype)) with cuda tensors, always use
|
| 159 |
+
# torch._inductor.kernel.mm.tuned_mixed_mm's fused kernel.
|
| 160 |
+
# Autotune will not compare with normal cast->then->mm option.
|
| 161 |
+
# (if force_mixed_mm is true, the use_mixed_mm flag will be ignored)
|
| 162 |
+
force_mixed_mm = False
|
| 163 |
+
|
| 164 |
+
# enable reordering pass for increasing overlap between compute and communication
|
| 165 |
+
reorder_for_compute_comm_overlap = False
|
| 166 |
+
|
| 167 |
+
# passes (in execution order) for increasing overlap between compute and communication
|
| 168 |
+
# for built-in passes, use string name; for user-defined passes, pass in the function handle
|
| 169 |
+
reorder_for_compute_comm_overlap_passes = [
|
| 170 |
+
"reorder_compute_for_overlap",
|
| 171 |
+
"sink_waits",
|
| 172 |
+
"raise_comms",
|
| 173 |
+
]
|
| 174 |
+
|
| 175 |
+
# runtime estimation function for ops
|
| 176 |
+
# for built-in estimation function, pass in "default"; for user-defined estimation function, pass in the function handle
|
| 177 |
+
estimate_op_runtime = "default"
|
| 178 |
+
|
| 179 |
+
# unit: GB/s, uni-directional P2P bandwidth per card
|
| 180 |
+
# default value is NVLink
|
| 181 |
+
intra_node_bw = 300
|
| 182 |
+
|
| 183 |
+
# unit: GB/s, uni-directional P2P bandwidth per node
|
| 184 |
+
# default value is InfiniBand
|
| 185 |
+
inter_node_bw = 25
|
| 186 |
+
|
| 187 |
+
# enable slow autotuning passes to select algorithms
|
| 188 |
+
max_autotune = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE") == "1"
|
| 189 |
+
|
| 190 |
+
# enable slow autotuning passes to select pointwise/reductions algorithms
|
| 191 |
+
max_autotune_pointwise = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE") == "1"
|
| 192 |
+
|
| 193 |
+
# enable slow autotuning passes to select gemm algorithms
|
| 194 |
+
max_autotune_gemm = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_GEMM") == "1"
|
| 195 |
+
|
| 196 |
+
# enable autotune local cache
|
| 197 |
+
use_autotune_local_cache = True
|
| 198 |
+
|
| 199 |
+
# enable autotune remote cache
|
| 200 |
+
use_autotune_remote_cache = (
|
| 201 |
+
os.environ.get("TORCH_INDUCTOR_AUTOTUNE_REMOTE_CACHE") == "1"
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# force cublas and triton to use the same precision; cublas supports TF32 for matmul operations
|
| 205 |
+
# when m, n, k are multiples of 16, 16, 8, whereas triton supports TF32 for matmul operations
|
| 206 |
+
# for any combinations of m, n, k, regardless of their alignment. setting this flag will ensure
|
| 207 |
+
# that triton does not use TF32 wherever cublas would not use TF32
|
| 208 |
+
force_same_precision = (
|
| 209 |
+
True if is_fbcode() else os.environ.get("TORCHINDUCTOR_FORCE_SAME_PRECISION") == "1"
|
| 210 |
+
)
|
| 211 |
+
# Specify candidate backends for gemm autotune.
|
| 212 |
+
# Possible choices are combinations of: ATen, Triton, CUTLASS.
|
| 213 |
+
# ATen: default Pytorch ATen kernels.
|
| 214 |
+
# Triton: Triton templates defined in torch inductor.
|
| 215 |
+
# CUTLASS: Cutlass templates and kernels.
|
| 216 |
+
max_autotune_gemm_backends = os.environ.get(
|
| 217 |
+
"TORCHINDUCTOR_MAX_AUTOTUNE_GEMM_BACKENDS", "ATEN,TRITON"
|
| 218 |
+
).upper()
|
| 219 |
+
|
| 220 |
+
# the value used as a fallback for the unbacked SymInts
|
| 221 |
+
# that can appear in the input shapes (e.g., in autotuning)
|
| 222 |
+
unbacked_symint_fallback = 8192
|
| 223 |
+
|
| 224 |
+
# enable searching global and local cache regardless of `max_autotune`
|
| 225 |
+
search_autotune_cache = os.environ.get("TORCHINDUCTOR_SEARCH_AUTOTUNE_CACHE") == "1"
|
| 226 |
+
|
| 227 |
+
save_args = os.environ.get("TORCHINDUCTOR_SAVE_ARGS") == "1"
|
| 228 |
+
|
| 229 |
+
# We will disable creating subprocess for autotuning if this is False
|
| 230 |
+
autotune_in_subproc = os.environ.get("TORCHINDUCTOR_AUTOTUNE_IN_SUBPROC") == "1"
|
| 231 |
+
|
| 232 |
+
# If autotuning in subprocess, whether to use multiple devices
|
| 233 |
+
autotune_multi_device = os.environ.get("TORCHINDUCTOR_AUTOTUNE_MULTI_DEVICE") == "1"
|
| 234 |
+
|
| 235 |
+
coordinate_descent_tuning = (
|
| 236 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_TUNING") == "1"
|
| 237 |
+
)
|
| 238 |
+
coordinate_descent_check_all_directions = (
|
| 239 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_CHECK_ALL_DIRECTIONS") == "1"
|
| 240 |
+
)
|
| 241 |
+
coordinate_descent_search_radius = int(
|
| 242 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_RADIUS", "1")
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
# Disabled by default on ROCm, opt-in if model utilises NHWC convolutions
|
| 246 |
+
layout_opt_default = "1" if not torch.version.hip else "0"
|
| 247 |
+
layout_optimization = (
|
| 248 |
+
os.environ.get("TORCHINDUCTOR_LAYOUT_OPTIMIZATION", layout_opt_default) == "1"
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
force_layout_optimization = os.environ.get("TORCHINDUCTOR_FORCE_LAYOUT_OPT", "0") == "1"
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# Whether to keep the output strides the same as eager after layout optimization.
|
| 255 |
+
keep_output_stride = os.environ.get("TORCHINDUCTOR_KEEP_OUTPUT_STRIDE", "1") == "1"
|
| 256 |
+
|
| 257 |
+
# Enabling this will let compiler print warning messages if a generated triton
|
| 258 |
+
# kernel has inputs with mixed layouts. This is helpful for perf debugging
|
| 259 |
+
# since kernel with mixed layout inputs may run much slower then one whose inputs
|
| 260 |
+
# have uniform layouts.
|
| 261 |
+
warn_mix_layout = os.environ.get("TORCHINDUCTOR_WARN_MIX_LAYOUT") == "1"
|
| 262 |
+
|
| 263 |
+
# control store vs recompute heuristic
|
| 264 |
+
# For fanouts, rematerialization can lead to exponential blowup. So, have
|
| 265 |
+
# smaller threshold
|
| 266 |
+
realize_reads_threshold = 4
|
| 267 |
+
realize_opcount_threshold = 30
|
| 268 |
+
|
| 269 |
+
# Threshold to prevent excessive accumulation of ops in one buffer during lowering
|
| 270 |
+
realize_acc_reads_threshold = 8
|
| 271 |
+
|
| 272 |
+
# fallback to eager for random/dropout, this is slow but useful for debugging
|
| 273 |
+
fallback_random = False
|
| 274 |
+
|
| 275 |
+
# automatically create fallbacks when encountering an unhandled op
|
| 276 |
+
implicit_fallbacks = True
|
| 277 |
+
|
| 278 |
+
# fuse even in cases without common reads
|
| 279 |
+
aggressive_fusion = False
|
| 280 |
+
|
| 281 |
+
# For each fused kernel in the wrapper, comment with the nodes that get fused.
|
| 282 |
+
# Useful for debugging fusion.
|
| 283 |
+
debug_fusion = os.environ.get("TORCHINDUCTOR_DEBUG_FUSION") == "1"
|
| 284 |
+
benchmark_fusion = os.environ.get("TORCHINDUCTOR_BENCHMARK_FUSION") == "1"
|
| 285 |
+
enabled_metric_tables = os.environ.get("TORCHINDUCTOR_ENABLED_METRIC_TABLES", "")
|
| 286 |
+
|
| 287 |
+
# how many nodes to allow into a single fusion
|
| 288 |
+
max_fusion_size = 64
|
| 289 |
+
|
| 290 |
+
# max number of inputs to generate cat as a pointwise op with masked laods
|
| 291 |
+
max_pointwise_cat_inputs = 8
|
| 292 |
+
|
| 293 |
+
# replace small reductions with pointwise, disable with `= 1`
|
| 294 |
+
unroll_reductions_threshold = 8
|
| 295 |
+
|
| 296 |
+
# Add extra comments to output code (causes compile cache misses)
|
| 297 |
+
comment_origin = False
|
| 298 |
+
|
| 299 |
+
# Convert 1x1 convs into matmuls
|
| 300 |
+
conv_1x1_as_mm = False
|
| 301 |
+
|
| 302 |
+
# Enable split reductions for better utilization when the dimension
|
| 303 |
+
# being reduced over is large (by splitting it)
|
| 304 |
+
split_reductions = True
|
| 305 |
+
|
| 306 |
+
benchmark_kernel = os.environ.get("TORCHINDUCTOR_BENCHMARK_KERNEL", "0") == "1"
|
| 307 |
+
|
| 308 |
+
# Enable constant and index_expr folding
|
| 309 |
+
constant_and_index_propagation = True
|
| 310 |
+
|
| 311 |
+
# we always add constants into graph.constants without
|
| 312 |
+
# performing any constant-inlining optimization
|
| 313 |
+
always_keep_tensor_constants = False
|
| 314 |
+
|
| 315 |
+
# assert that indirect indexing does not read / write out of bounds
|
| 316 |
+
assert_indirect_indexing = True
|
| 317 |
+
|
| 318 |
+
# constant folding on the joint graph
|
| 319 |
+
joint_graph_constant_folding = True
|
| 320 |
+
|
| 321 |
+
# Enable indirect_indexing asserts for decompositions and lowerings
|
| 322 |
+
debug_index_asserts = False
|
| 323 |
+
|
| 324 |
+
# warnings intended for PyTorch developers, disable for point releases
|
| 325 |
+
is_nightly_or_source = "dev" in torch.__version__ or "git" in torch.__version__
|
| 326 |
+
developer_warnings = is_fbcode() or is_nightly_or_source
|
| 327 |
+
|
| 328 |
+
# The multiprocessing start method to use for inductor workers in the codecache.
|
| 329 |
+
# TODO: fork is not safe in a multithreaded environment, we should evaluate changing
|
| 330 |
+
# the default to spawn.
|
| 331 |
+
worker_start_method = "fork"
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def decide_compile_threads():
|
| 335 |
+
"""
|
| 336 |
+
Here are the precedence to decide compile_threads
|
| 337 |
+
1. User can override it by TORCHINDUCTOR_COMPILE_THREADS. One may want to disable async compiling by
|
| 338 |
+
setting this to 1 to make pdb happy.
|
| 339 |
+
2. Set to 1 if it's win32 platform or it's a fbcode build
|
| 340 |
+
3. decide by the number of CPU cores
|
| 341 |
+
"""
|
| 342 |
+
if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
|
| 343 |
+
return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
|
| 344 |
+
elif sys.platform == "win32" or is_fbcode():
|
| 345 |
+
return 1
|
| 346 |
+
else:
|
| 347 |
+
cpu_count = (
|
| 348 |
+
len(os.sched_getaffinity(0))
|
| 349 |
+
if hasattr(os, "sched_getaffinity")
|
| 350 |
+
else os.cpu_count()
|
| 351 |
+
)
|
| 352 |
+
assert cpu_count
|
| 353 |
+
return min(32, cpu_count)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
compile_threads = decide_compile_threads()
|
| 357 |
+
|
| 358 |
+
# gemm autotuning global cache dir
|
| 359 |
+
if is_fbcode():
|
| 360 |
+
from libfb.py import parutil
|
| 361 |
+
|
| 362 |
+
try:
|
| 363 |
+
if __package__:
|
| 364 |
+
global_cache_dir = parutil.get_dir_path(
|
| 365 |
+
os.path.join(__package__.replace(".", os.sep), "fb/cache")
|
| 366 |
+
)
|
| 367 |
+
else:
|
| 368 |
+
global_cache_dir = parutil.get_dir_path("fb/cache")
|
| 369 |
+
except ValueError:
|
| 370 |
+
global_cache_dir = None
|
| 371 |
+
else:
|
| 372 |
+
global_cache_dir = None
|
| 373 |
+
|
| 374 |
+
# If kernel is fused, the name is generated from the origin node op names
|
| 375 |
+
# for larger kernels limit this
|
| 376 |
+
kernel_name_max_ops = 10
|
| 377 |
+
|
| 378 |
+
# Pad input tensors of matmul/bmm/addmm to leverage Tensor Cores in NVIDIA GPUs
|
| 379 |
+
shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
|
| 380 |
+
|
| 381 |
+
# Fx-based linear/matmul/bmm + permute/transpose vertical fusion
|
| 382 |
+
permute_fusion = os.environ.get("TORCHINDUCTOR_PERMUTE_FUSION", "0") == "1"
|
| 383 |
+
|
| 384 |
+
# Mark the wrapper call in PyTorch profiler
|
| 385 |
+
profiler_mark_wrapper_call = False
|
| 386 |
+
|
| 387 |
+
# Generate hook calls to torch._inductor.hooks.run_intermediate_hooks for
|
| 388 |
+
# every intermediate for which we can correlate it with an intermediate
|
| 389 |
+
# from the original FX graph
|
| 390 |
+
generate_intermediate_hooks = False
|
| 391 |
+
|
| 392 |
+
# Populate traceback field on IRNode; good for debugging why origin_node is
|
| 393 |
+
# not populated, or finding out where an IRNode was constructed
|
| 394 |
+
debug_ir_traceback = False
|
| 395 |
+
|
| 396 |
+
# used for debugging to make sure config is properly set
|
| 397 |
+
_raise_error_for_testing = False
|
| 398 |
+
|
| 399 |
+
_profile_var = os.environ.get("TORCHINDUCTOR_PROFILE", "")
|
| 400 |
+
profile_bandwidth = _profile_var != ""
|
| 401 |
+
profile_bandwidth_regex = "" if _profile_var == "1" else _profile_var
|
| 402 |
+
# Specify a file where we print out the profiling results.
|
| 403 |
+
# None means we do not dump results to a file.
|
| 404 |
+
profile_bandwidth_output = os.environ.get("TORCHINDUCTOR_PROFILE_OUTPUT", None)
|
| 405 |
+
|
| 406 |
+
# TODO: remove later
|
| 407 |
+
disable_cpp_codegen = False
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
# Freezing will attempt to inline weights as constants in optimization
|
| 411 |
+
# and run constant folding and other optimizations on them. After freezing, weights
|
| 412 |
+
# can no longer be updated.
|
| 413 |
+
freezing: bool = os.environ.get("TORCHINDUCTOR_FREEZING", "0") == "1"
|
| 414 |
+
|
| 415 |
+
# Make freezing invalidate the eager Parameters of nn modules, to avoid memory overhead
|
| 416 |
+
# of potentially keeping multiple copies of weights.
|
| 417 |
+
freezing_discard_parameters: bool = False
|
| 418 |
+
|
| 419 |
+
# Kill switch for allowing temporary tensors to be allocated as stack arrays. Tests
|
| 420 |
+
# should be run with this flag both on and off to make sure we have coverage.
|
| 421 |
+
allow_stack_allocation: bool = (
|
| 422 |
+
os.environ.get("TORCHINDUCTOR_STACK_ALLOCATION", "1") == "1"
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
# Enables an alternate DSO interface (the "minimal ArrayRef interface") intended
|
| 426 |
+
# to maximize performance for use cases that it can accommodate at the expense of
|
| 427 |
+
# generality. In brief:
|
| 428 |
+
# - inputs and outputs are ArrayRefTensor<T> (note that strides are required, but the
|
| 429 |
+
# tensor must be contiguous)
|
| 430 |
+
# - constant handling is unchanged because it is not a per-inference-iteration bottleneck
|
| 431 |
+
#
|
| 432 |
+
# When the DSO is generated in this mode, the usual interface will also be supported,
|
| 433 |
+
# but performance for that interface may be degraded.
|
| 434 |
+
use_minimal_arrayref_interface: bool = False
|
| 435 |
+
|
| 436 |
+
# decompose some memory bound matmul/bmm to mul
|
| 437 |
+
decompose_mem_bound_mm: bool = False
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
# config specific to codegen/cpp.py
|
| 441 |
+
class cpp:
|
| 442 |
+
# set to torch.get_num_threads()
|
| 443 |
+
threads = -1
|
| 444 |
+
|
| 445 |
+
# Do not generate loops when the condition doesn't hold, like:
|
| 446 |
+
# for(long i0=4096; i0<4096; i0+=1)
|
| 447 |
+
no_redundant_loops = True
|
| 448 |
+
|
| 449 |
+
# Assume number of threads is dynamic, don't specialize thread number.
|
| 450 |
+
# Kernels don't recompile on thread number changes with this flag on.
|
| 451 |
+
# For single-threaded workload, turning it on would incur a slight
|
| 452 |
+
# performance degradation.
|
| 453 |
+
dynamic_threads = False
|
| 454 |
+
|
| 455 |
+
simdlen: Optional[int] = None
|
| 456 |
+
min_chunk_size = 4096
|
| 457 |
+
cxx = (
|
| 458 |
+
None, # download gcc12 from conda-forge if conda is installed
|
| 459 |
+
# "g++-12",
|
| 460 |
+
# "g++-11",
|
| 461 |
+
# "g++-10",
|
| 462 |
+
# "clang++",
|
| 463 |
+
os.environ.get("CXX", "clang++" if sys.platform == "darwin" else "g++"),
|
| 464 |
+
# "g++.par",
|
| 465 |
+
)
|
| 466 |
+
# Allow kernel performance profiling via PyTorch profiler
|
| 467 |
+
enable_kernel_profile = False
|
| 468 |
+
|
| 469 |
+
# enable weight prepacking to get a better performance; may lead to large memory footprint
|
| 470 |
+
weight_prepack = True
|
| 471 |
+
|
| 472 |
+
# Inject a bug into our relu implementation; useful for testing our repro
|
| 473 |
+
# extraction and minification functionality.
|
| 474 |
+
# Valid values: "compile_error", "runtime_error", "accuracy"
|
| 475 |
+
inject_relu_bug_TESTING_ONLY: Optional[str] = None
|
| 476 |
+
inject_log1p_bug_TESTING_ONLY: Optional[str] = None
|
| 477 |
+
|
| 478 |
+
# If None, autodetect whether or not AVX512/AVX2 can be used. Otherwise,
|
| 479 |
+
# force usage as specified, without testing.
|
| 480 |
+
vec_isa_ok: Optional[bool] = None
|
| 481 |
+
|
| 482 |
+
# similar to config.triton.descriptive_names
|
| 483 |
+
descriptive_names = "original_aten"
|
| 484 |
+
|
| 485 |
+
# how many nodes to allow into a single horizontal fusion
|
| 486 |
+
max_horizontal_fusion_size = 16
|
| 487 |
+
|
| 488 |
+
# Make scatter_reduce fallback when reduce is sum to avoid performance regression
|
| 489 |
+
# using atomic_add.
|
| 490 |
+
fallback_scatter_reduce_sum = True
|
| 491 |
+
|
| 492 |
+
# Use funsafe-math-optimizations when compiling
|
| 493 |
+
enable_unsafe_math_opt_flag = False
|
| 494 |
+
|
| 495 |
+
# Use ffp-contract when compiling
|
| 496 |
+
enable_floating_point_contract_flag = False
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
# config specific to codegen/triton.py
|
| 500 |
+
class triton:
|
| 501 |
+
# Use cudagraphs on output code
|
| 502 |
+
cudagraphs = False
|
| 503 |
+
|
| 504 |
+
# Use cudagraph trees for memory pooling if `cudagraphs` is True
|
| 505 |
+
cudagraph_trees = True
|
| 506 |
+
|
| 507 |
+
# assertions not on the fast path, steady state
|
| 508 |
+
slow_path_cudagraph_asserts = True
|
| 509 |
+
|
| 510 |
+
# TODO - need to debug why this prevents cleanup
|
| 511 |
+
cudagraph_trees_history_recording = False
|
| 512 |
+
|
| 513 |
+
# assertions on the fast path
|
| 514 |
+
fast_path_cudagraph_asserts = False
|
| 515 |
+
|
| 516 |
+
# skip warmup for cudagraph trees
|
| 517 |
+
skip_cudagraph_warmup = False
|
| 518 |
+
|
| 519 |
+
# Synchronize before and after every compiled graph.
|
| 520 |
+
debug_sync_graph = False
|
| 521 |
+
|
| 522 |
+
# Synchronize after every kernel launch, to help pinpoint bugs
|
| 523 |
+
debug_sync_kernel = False
|
| 524 |
+
|
| 525 |
+
# Always load full blocks (rather than broadcasting inside the block)
|
| 526 |
+
dense_indexing = False
|
| 527 |
+
|
| 528 |
+
# limit tiling dimensions
|
| 529 |
+
max_tiles = 2
|
| 530 |
+
|
| 531 |
+
# use triton.autotune for pointwise ops with complex layouts
|
| 532 |
+
# this should only be disabled for debugging/testing
|
| 533 |
+
autotune_pointwise = True
|
| 534 |
+
|
| 535 |
+
# max autotune gemm with cublasLt
|
| 536 |
+
autotune_cublasLt = True
|
| 537 |
+
|
| 538 |
+
# should we stop a fusion to allow better tiling?
|
| 539 |
+
tiling_prevents_pointwise_fusion = True
|
| 540 |
+
tiling_prevents_reduction_fusion = True
|
| 541 |
+
|
| 542 |
+
# should we give different names to kernels
|
| 543 |
+
# Note: This is orthogonal to descriptive_names - this is deciding whether
|
| 544 |
+
# our triton kernel names should all be `triton_` (to maximize caching) or
|
| 545 |
+
# whether they should be unique.
|
| 546 |
+
unique_kernel_names = os.environ.get("TORCHINDUCTOR_UNIQUE_KERNEL_NAMES") == "1"
|
| 547 |
+
|
| 548 |
+
# should we put op names in kernel names
|
| 549 |
+
# False: No special names (just triton__1, triton__2, etc.)
|
| 550 |
+
# "torch": Maps to the fx op in the Dynamo graph (module name, method name, etc.)
|
| 551 |
+
# "original_aten": Maps to the highest-level aten op (i.e. pre-decompositions)
|
| 552 |
+
# "inductor_node": Maps to the node name in the FX graph passed to Inductor
|
| 553 |
+
descriptive_names = "original_aten"
|
| 554 |
+
|
| 555 |
+
# use alternate codegen for smaller reductions
|
| 556 |
+
persistent_reductions = (
|
| 557 |
+
os.environ.get("TORCHINDUCTOR_PERSISTENT_REDUCTIONS", "1") == "1"
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
# 0/False: disable
|
| 561 |
+
# 1/True: enable, use tuning to pick between different subkernels
|
| 562 |
+
# 2: enable, force using persistent reduction (for debugging)
|
| 563 |
+
# 3: enable, force using non-persistent reduction (for debugging)
|
| 564 |
+
multi_kernel = int(os.environ.get("TORCHINDUCTOR_MULTI_KERNEL", "0"))
|
| 565 |
+
|
| 566 |
+
# hint to Triton when arguments are divisible by 16
|
| 567 |
+
divisible_by_16 = True
|
| 568 |
+
|
| 569 |
+
# theses are not enforced, but they are used by asserts in triton_heuristics.py
|
| 570 |
+
# NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048
|
| 571 |
+
|
| 572 |
+
# Max RBLOCK will be large for multi-kernel since we do more aggressive
|
| 573 |
+
# persistent reduction.
|
| 574 |
+
max_block = {
|
| 575 |
+
"X": 2048,
|
| 576 |
+
"Y": 1024,
|
| 577 |
+
"Z": 1024,
|
| 578 |
+
"R": 4096 * (16 if multi_kernel else 1),
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
# Minimum RBLOCK to be used for a TritonSplitScanKernel
|
| 582 |
+
# NOTE: This also indirectly controls the size of workspace buffer required
|
| 583 |
+
min_split_scan_rblock = 256
|
| 584 |
+
|
| 585 |
+
# Store the generated cubin files for cpp wrapper code to load
|
| 586 |
+
store_cubin = False
|
| 587 |
+
|
| 588 |
+
# the max number of spills we allow for the configs we benchmark.
|
| 589 |
+
# Setting this to 0 means we skip a config if it spills even a single
|
| 590 |
+
# register.
|
| 591 |
+
# Setting it to a larger value allows a config spilling a small amount
|
| 592 |
+
# of registers being benchmarked.
|
| 593 |
+
#
|
| 594 |
+
# NOTE: triton will always report >0 register spills for kernels using sin/cos.
|
| 595 |
+
# (check this issue https://github.com/openai/triton/issues/1756 )
|
| 596 |
+
# So far we see a fixed 8 spilled registers for kernels using sin/cos.
|
| 597 |
+
# Raise the threshold to 16 to be safe.
|
| 598 |
+
# We should revisit this once we understand more of the source of register spills.
|
| 599 |
+
spill_threshold: int = 16
|
| 600 |
+
|
| 601 |
+
# Generate code containing the newer tl.make_block_ptr() API for loads/store
|
| 602 |
+
use_block_ptr = False
|
| 603 |
+
|
| 604 |
+
# Inject a bug into our relu implementation; useful for testing our repro
|
| 605 |
+
# extraction and minification functionality.
|
| 606 |
+
# Valid values: "compile_error", "runtime_error", "accuracy"
|
| 607 |
+
inject_relu_bug_TESTING_ONLY: Optional[str] = None
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
class aot_inductor:
|
| 611 |
+
# AOTInductor output path
|
| 612 |
+
# If an absolute path is specified, the generated lib files will be stored under the directory;
|
| 613 |
+
# If a relative path is specified, it will be used as a subdirectory under the default caching path;
|
| 614 |
+
# If not specified, a temp directory will be created under the default caching path.
|
| 615 |
+
# If the specified path contains something like "model.so", the sub-string will be used
|
| 616 |
+
# to name the generated library.
|
| 617 |
+
output_path = ""
|
| 618 |
+
|
| 619 |
+
debug_compile = os.environ.get("AOT_INDUCTOR_DEBUG_COMPILE", "0") == "1"
|
| 620 |
+
|
| 621 |
+
# Serialized tree spec for flattening inputs
|
| 622 |
+
serialized_in_spec = ""
|
| 623 |
+
|
| 624 |
+
# Serialized tree spec for flattening outputs
|
| 625 |
+
serialized_out_spec = ""
|
| 626 |
+
|
| 627 |
+
# flag to decide whether to create a submodule for constant graph.
|
| 628 |
+
use_runtime_constant_folding: bool = False
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
class cuda:
|
| 632 |
+
# CUDA arch to use for CUDA template kernel compilation.
|
| 633 |
+
# e.g. "70", "75", "80", "90", etc.
|
| 634 |
+
# When arch is None, Inductor uses torch.cuda.get_device_capability(0).
|
| 635 |
+
arch: Optional[str] = None
|
| 636 |
+
|
| 637 |
+
# CUDA version to use for CUDA template kernel compilation.
|
| 638 |
+
# e.g. "11.4", "12.1", etc.
|
| 639 |
+
# When version is None, Inductor uses torch.version.cuda.
|
| 640 |
+
version: Optional[str] = None
|
| 641 |
+
|
| 642 |
+
# Optimization level for the host compiler.
|
| 643 |
+
compile_opt_level = "-O1"
|
| 644 |
+
|
| 645 |
+
# Whether to enable device LTO (link-time-optimization).
|
| 646 |
+
enable_cuda_lto = False
|
| 647 |
+
|
| 648 |
+
# Whether to keep intermediate files dring compilation.
|
| 649 |
+
enable_ptxas_info = False
|
| 650 |
+
|
| 651 |
+
# Whether to enable debug info, e.g. line number, cutlass debug info.
|
| 652 |
+
enable_debug_info = False
|
| 653 |
+
|
| 654 |
+
# Whether to use fast math.
|
| 655 |
+
use_fast_math = False
|
| 656 |
+
|
| 657 |
+
# Path to the CUTLASS repo root directory.
|
| 658 |
+
# The default path only works under PyTorch local development environment.
|
| 659 |
+
cutlass_dir = os.environ.get(
|
| 660 |
+
"TORCHINDUCTOR_CUTLASS_DIR",
|
| 661 |
+
os.path.abspath(
|
| 662 |
+
os.path.join(os.path.dirname(torch.__file__), "../third_party/cutlass/")
|
| 663 |
+
),
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# Configures the maximum number of CUTLASS configs to profile in max_autotune.
|
| 667 |
+
# By default it's None, so that all CUTLASS configs are tuned.
|
| 668 |
+
# This is mainly used to reduce test time in CI.
|
| 669 |
+
cutlass_max_profiling_configs: Optional[int] = None
|
| 670 |
+
|
| 671 |
+
# Path to CUDA NVCC.
|
| 672 |
+
# NVCC search order:
|
| 673 |
+
# 1) cuda_cxx set in this config
|
| 674 |
+
# 2)CUDACXX environment variable
|
| 675 |
+
# 3)CUDA_HOME environment variable
|
| 676 |
+
# 4) default system search PATH.
|
| 677 |
+
cuda_cxx: Optional[str] = None
|
| 678 |
+
|
| 679 |
+
# If set to True, it will ensure that only GEMM ops capable of
|
| 680 |
+
# epilogue fusion via CUTLASS Epilogue Visitor Trees ( EVT )
|
| 681 |
+
# are enabled for the CUTLASS backend.
|
| 682 |
+
cutlass_only_evt_capable_ops: bool = False
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
# create a directory containing lots of debug information
|
| 686 |
+
class trace:
|
| 687 |
+
# master switch for all debugging flags below
|
| 688 |
+
enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
|
| 689 |
+
|
| 690 |
+
# Save debug information to a temporary directory
|
| 691 |
+
# If not specified, a temp directory will be created by system
|
| 692 |
+
debug_dir: Optional[str] = None
|
| 693 |
+
|
| 694 |
+
# Save python logger call >=logging.DEBUG
|
| 695 |
+
debug_log = False
|
| 696 |
+
|
| 697 |
+
# Save python logger call >=logging.INFO
|
| 698 |
+
info_log = False
|
| 699 |
+
|
| 700 |
+
# Save input FX graph (post decomps, pre optimization)
|
| 701 |
+
fx_graph = True
|
| 702 |
+
|
| 703 |
+
# Save FX graph after transformations
|
| 704 |
+
fx_graph_transformed = True
|
| 705 |
+
|
| 706 |
+
# Save TorchInductor IR before fusion pass
|
| 707 |
+
ir_pre_fusion = True
|
| 708 |
+
|
| 709 |
+
# Save TorchInductor IR after fusion pass
|
| 710 |
+
ir_post_fusion = True
|
| 711 |
+
|
| 712 |
+
# Copy generated code to trace dir
|
| 713 |
+
output_code = True
|
| 714 |
+
|
| 715 |
+
# SVG figure showing post-fusion graph
|
| 716 |
+
graph_diagram = os.environ.get("INDUCTOR_POST_FUSION_SVG", "0") == "1"
|
| 717 |
+
|
| 718 |
+
# SVG figure showing fx with fusion
|
| 719 |
+
draw_orig_fx_graph = os.environ.get("INDUCTOR_ORIG_FX_SVG", "0") == "1"
|
| 720 |
+
|
| 721 |
+
# We draw our fx graphs with the "record" shape attribute by default.
|
| 722 |
+
# Sometimes, when the graph is very complex, we may hit dot errors like below:
|
| 723 |
+
# "flat edge between adjacent nodes one of which has a record shape -
|
| 724 |
+
# replace records with HTML-like labels"
|
| 725 |
+
# and thus fail to generate a graph. So, let's give the user an option
|
| 726 |
+
# to specify the shape attribute for the dot graph. For example, passing
|
| 727 |
+
# INDUCTOR_DOT_GRAPH_SHAPE_SVG = "none" would let us generate HTML-like lables
|
| 728 |
+
# to workaround the above failure.
|
| 729 |
+
dot_graph_shape = os.environ.get("INDUCTOR_DOT_GRAPH_SHAPE_SVG", None)
|
| 730 |
+
|
| 731 |
+
# Store cProfile (see snakeviz to view)
|
| 732 |
+
compile_profile = False
|
| 733 |
+
|
| 734 |
+
# Upload the .tar.gz file
|
| 735 |
+
# Needs to be overriden based on specific environment needs
|
| 736 |
+
upload_tar: Optional[Callable[[str], None]] = None
|
| 737 |
+
|
| 738 |
+
log_autotuning_results: bool = False
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
_save_config_ignore = {
|
| 742 |
+
# workaround: "Can't pickle <function ...>"
|
| 743 |
+
"trace.upload_tar",
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
if TYPE_CHECKING:
|
| 747 |
+
from torch.utils._config_typing import * # noqa: F401, F403
|
| 748 |
+
|
| 749 |
+
from torch.utils._config_module import install_config_module
|
| 750 |
+
|
| 751 |
+
# adds patch, save_config, etc
|
| 752 |
+
install_config_module(sys.modules[__name__])
|
vila/lib/python3.10/site-packages/torch/_inductor/constant_folding.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
from typing import Any, Callable, Dict, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.utils._pytree as pytree
|
| 6 |
+
|
| 7 |
+
aten = torch.ops.aten
|
| 8 |
+
|
| 9 |
+
# We would like to split modules into two subgraphs for runtime weight updates to work correctly.
|
| 10 |
+
# The use case and more information could be found at:
|
| 11 |
+
# https://docs.google.com/document/d/1inZC-8KarJ6gKB7G9egmYLx1V_dKX_apxon0w4zPC0Q/edit?usp=sharing
|
| 12 |
+
META_TAG = "MODULE_TYPE"
|
| 13 |
+
MODULE_TAG = "_MAIN_MODULE"
|
| 14 |
+
CONST_MODULE_TAG = "_CONST_MODULE"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def replace_node_with_constant(gm, node, constant, name=None):
|
| 18 |
+
g = gm.graph
|
| 19 |
+
|
| 20 |
+
if name:
|
| 21 |
+
qualname = name
|
| 22 |
+
else:
|
| 23 |
+
if not hasattr(gm, "_frozen_param_count"):
|
| 24 |
+
gm._frozen_param_count = 0
|
| 25 |
+
i = gm._frozen_param_count
|
| 26 |
+
|
| 27 |
+
while True:
|
| 28 |
+
qualname = f"_frozen_param{i}"
|
| 29 |
+
if not hasattr(gm, qualname):
|
| 30 |
+
break
|
| 31 |
+
i += 1
|
| 32 |
+
|
| 33 |
+
gm._frozen_param_count = i + 1
|
| 34 |
+
|
| 35 |
+
with g.inserting_before(node):
|
| 36 |
+
new_input_node = g.create_node("get_attr", qualname, (), {})
|
| 37 |
+
node.replace_all_uses_with(new_input_node)
|
| 38 |
+
new_input_node.meta.update(node.meta)
|
| 39 |
+
g.erase_node(node)
|
| 40 |
+
|
| 41 |
+
# needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning
|
| 42 |
+
gm.register_buffer(qualname, constant)
|
| 43 |
+
setattr(gm, qualname, constant)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class ConstantFolder(torch.fx.Interpreter):
|
| 47 |
+
def __init__(
|
| 48 |
+
self,
|
| 49 |
+
gm,
|
| 50 |
+
skip_constructors=False,
|
| 51 |
+
):
|
| 52 |
+
super().__init__(gm)
|
| 53 |
+
self.node_replacements: Dict[torch.fx.Node, Any] = {}
|
| 54 |
+
self.replaced_uses: Dict[torch.fx.Node, int] = collections.Counter()
|
| 55 |
+
self.unknown_value = object()
|
| 56 |
+
self.skip_constructors: bool = skip_constructors
|
| 57 |
+
|
| 58 |
+
# overwrite this to deallocate env values if their only remaining use
|
| 59 |
+
# is the output
|
| 60 |
+
self.user_to_last_uses = self.node_to_last_non_output_use()
|
| 61 |
+
|
| 62 |
+
def is_impure(self, node: torch.fx.node.Node):
|
| 63 |
+
if node.target in [
|
| 64 |
+
torch.ops.quantized_decomposed.dequantize_per_channel.default,
|
| 65 |
+
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
|
| 66 |
+
torch.ops.quantized_decomposed.dequantize_per_tensor.tensor,
|
| 67 |
+
]:
|
| 68 |
+
# For the pattern fp32_weight -> q -> dq
|
| 69 |
+
# We only folding fp32_weight -> q
|
| 70 |
+
# int8_weight and leave dq in graph to be fused
|
| 71 |
+
return True
|
| 72 |
+
return False
|
| 73 |
+
|
| 74 |
+
def node_to_last_non_output_use(self):
|
| 75 |
+
last_non_output_use = collections.defaultdict(list)
|
| 76 |
+
seen_uses = set()
|
| 77 |
+
output_node = next(iter(reversed(self.module.graph.nodes)))
|
| 78 |
+
|
| 79 |
+
for node in reversed(self.module.graph.nodes):
|
| 80 |
+
if node.target == "output":
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
def add_use(inp):
|
| 84 |
+
if inp in seen_uses:
|
| 85 |
+
return
|
| 86 |
+
|
| 87 |
+
seen_uses.add(inp)
|
| 88 |
+
last_non_output_use[node].append(inp)
|
| 89 |
+
|
| 90 |
+
pytree.tree_map_only(torch.fx.Node, add_use, (node.args, node.kwargs))
|
| 91 |
+
|
| 92 |
+
# if this node is only used in output, we want to gc it right away
|
| 93 |
+
if len(node.users) == 1 and output_node in node.users:
|
| 94 |
+
last_non_output_use[node].append(node)
|
| 95 |
+
|
| 96 |
+
return last_non_output_use
|
| 97 |
+
|
| 98 |
+
def run_node(self, node):
|
| 99 |
+
if node.target == "output":
|
| 100 |
+
# because we remove nodes from env on last non output use,
|
| 101 |
+
# re-define them now or we'll get error in interpreter
|
| 102 |
+
def set_env(arg):
|
| 103 |
+
self.env[arg] = self.unknown_value
|
| 104 |
+
|
| 105 |
+
pytree.tree_map_only(torch.fx.Node, set_env, node.args)
|
| 106 |
+
return super().run_node(node)
|
| 107 |
+
|
| 108 |
+
args, kwargs = self.fetch_args_kwargs_from_env(node)
|
| 109 |
+
flattened_inputs = pytree.arg_tree_leaves(*args, **kwargs)
|
| 110 |
+
|
| 111 |
+
if self.unknown_value in flattened_inputs:
|
| 112 |
+
return self.unknown_value
|
| 113 |
+
|
| 114 |
+
# TODO - fix errors with this
|
| 115 |
+
if (
|
| 116 |
+
node.op == "call_function"
|
| 117 |
+
and node.target == aten._efficientzerotensor.default
|
| 118 |
+
):
|
| 119 |
+
return self.unknown_value
|
| 120 |
+
|
| 121 |
+
# TODO - constant folding triton kernel returns the inputs -- fix this
|
| 122 |
+
if (
|
| 123 |
+
node.op == "call_function"
|
| 124 |
+
and node.name == "triton_kernel_wrapper_functional_proxy"
|
| 125 |
+
):
|
| 126 |
+
return self.unknown_value
|
| 127 |
+
|
| 128 |
+
# skip constructors, since inductor generates optimal code for them already
|
| 129 |
+
# and turning into tensor would result in an additional global memory read
|
| 130 |
+
# TODO - more complicated strategy
|
| 131 |
+
if (
|
| 132 |
+
self.skip_constructors
|
| 133 |
+
and node.op != "get_attr"
|
| 134 |
+
and not any(isinstance(e, torch.Tensor) for e in flattened_inputs)
|
| 135 |
+
):
|
| 136 |
+
return self.unknown_value
|
| 137 |
+
|
| 138 |
+
# All mutations should either be removed or on inputs which we did not make constant
|
| 139 |
+
if (
|
| 140 |
+
isinstance(node.target, torch._ops.OpOverload)
|
| 141 |
+
and torch.Tag.nondeterministic_seeded in node.target.tags
|
| 142 |
+
):
|
| 143 |
+
return self.unknown_value
|
| 144 |
+
|
| 145 |
+
out = super().run_node(node)
|
| 146 |
+
|
| 147 |
+
if node.op != "get_attr" and isinstance(out, torch.Tensor):
|
| 148 |
+
if not self.insertable_tensor_check(out):
|
| 149 |
+
return out
|
| 150 |
+
|
| 151 |
+
if self.is_impure(node):
|
| 152 |
+
return self.unknown_value
|
| 153 |
+
|
| 154 |
+
self.add_node_replacement(node, out)
|
| 155 |
+
|
| 156 |
+
flattened_node_inps = pytree.arg_tree_leaves(*node.args, **node.kwargs)
|
| 157 |
+
|
| 158 |
+
for n in flattened_node_inps:
|
| 159 |
+
if not isinstance(n, torch.fx.Node):
|
| 160 |
+
continue
|
| 161 |
+
|
| 162 |
+
self.replaced_uses[n] += 1
|
| 163 |
+
|
| 164 |
+
for to_delete in self.user_to_last_uses.get(node, []):
|
| 165 |
+
if self.replaced_uses[to_delete] == len(to_delete.users):
|
| 166 |
+
self.node_replacements.pop(to_delete, None)
|
| 167 |
+
|
| 168 |
+
return out
|
| 169 |
+
|
| 170 |
+
def insertable_tensor_check(self, tensor: torch.Tensor) -> bool:
|
| 171 |
+
return True
|
| 172 |
+
|
| 173 |
+
def add_node_replacement(self, node: torch.fx.Node, tensor: torch.Tensor) -> None:
|
| 174 |
+
self.node_replacements[node] = tensor
|
| 175 |
+
|
| 176 |
+
def run(self):
|
| 177 |
+
env = {}
|
| 178 |
+
for n in self.module.graph.nodes:
|
| 179 |
+
if n.op == "placeholder":
|
| 180 |
+
env[n] = self.unknown_value
|
| 181 |
+
return super().run(initial_env=env)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 185 |
+
def constant_fold(gm, constraint_fn: Optional[Callable[[torch.fx.Node], bool]] = None):
|
| 186 |
+
cf = ConstantFolder(gm, skip_constructors=True)
|
| 187 |
+
cf.run()
|
| 188 |
+
|
| 189 |
+
for node, constant in cf.node_replacements.items():
|
| 190 |
+
if constraint_fn is not None and not constraint_fn(node):
|
| 191 |
+
continue
|
| 192 |
+
replace_node_with_constant(gm, node, constant)
|
| 193 |
+
|
| 194 |
+
erased_params = []
|
| 195 |
+
for node in gm.graph.nodes:
|
| 196 |
+
if node.op == "get_attr" and len(node.users) == 0:
|
| 197 |
+
if hasattr(gm, node.target):
|
| 198 |
+
delattr(gm, node.target)
|
| 199 |
+
erased_params.append(node)
|
| 200 |
+
|
| 201 |
+
for node in erased_params:
|
| 202 |
+
gm.graph.erase_node(node)
|
| 203 |
+
|
| 204 |
+
gm.graph.eliminate_dead_code()
|
| 205 |
+
gm.graph.lint()
|
| 206 |
+
gm.recompile()
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 210 |
+
def constant_graph_tag(gm: torch.fx.GraphModule):
|
| 211 |
+
cf = ConstantFolder(gm, skip_constructors=True)
|
| 212 |
+
cf.run()
|
| 213 |
+
|
| 214 |
+
for node in gm.graph.nodes:
|
| 215 |
+
if (
|
| 216 |
+
node.op == "get_attr"
|
| 217 |
+
or node in cf.node_replacements
|
| 218 |
+
or node in cf.replaced_uses
|
| 219 |
+
):
|
| 220 |
+
node.meta[META_TAG] = CONST_MODULE_TAG
|
| 221 |
+
else:
|
| 222 |
+
node.meta[META_TAG] = MODULE_TAG
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 226 |
+
"""
|
| 227 |
+
Construct a GraphModule which corresponds to the part which could be
|
| 228 |
+
constant folded in provided gm.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
constant_graph_tag(gm)
|
| 232 |
+
# We rewrite the tags, if it's a constant being directly consumed, without
|
| 233 |
+
# any folding opportunity, we keep it in main gm.
|
| 234 |
+
for node in gm.graph.nodes:
|
| 235 |
+
if node.op == "get_attr":
|
| 236 |
+
used_to_fold = False
|
| 237 |
+
for u in node.users:
|
| 238 |
+
if u.meta[META_TAG] == CONST_MODULE_TAG:
|
| 239 |
+
used_to_fold = True
|
| 240 |
+
break
|
| 241 |
+
if not used_to_fold:
|
| 242 |
+
node.meta[META_TAG] = MODULE_TAG
|
| 243 |
+
|
| 244 |
+
new_graph = torch.fx.Graph()
|
| 245 |
+
|
| 246 |
+
node_remapping: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 247 |
+
output_nodes = []
|
| 248 |
+
for node in gm.graph.nodes:
|
| 249 |
+
if node.meta[META_TAG] == MODULE_TAG:
|
| 250 |
+
continue
|
| 251 |
+
|
| 252 |
+
new_node = new_graph.node_copy(node, lambda x: node_remapping[x])
|
| 253 |
+
node_remapping[node] = new_node
|
| 254 |
+
|
| 255 |
+
for user in node.users:
|
| 256 |
+
if user.meta[META_TAG] == MODULE_TAG:
|
| 257 |
+
output_nodes.append(new_node)
|
| 258 |
+
break
|
| 259 |
+
|
| 260 |
+
new_graph.output(tuple(output_nodes))
|
| 261 |
+
new_graph.lint()
|
| 262 |
+
new_gm = torch.fx.GraphModule(gm, new_graph)
|
| 263 |
+
|
| 264 |
+
return new_gm
|
vila/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py
ADDED
|
@@ -0,0 +1,2159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CUDA graph trees are a safety abstraction over CUDAGraphs, similar to make_graph_callables,
|
| 3 |
+
which share the same memory pool. Sharing a memory pool is an extremely
|
| 4 |
+
important optimization when chaining multiple CUDA graphs together, as it
|
| 5 |
+
prevents you from needing to copy intermediate tensors from one graph to the
|
| 6 |
+
next, and reduces overall memory usage by allowing dead memory from the first
|
| 7 |
+
pool to be reused in the second.
|
| 8 |
+
|
| 9 |
+
The standard graph/make_graph_callables support sharing memory pool, but
|
| 10 |
+
with a lot of caveats. CUDA graph trees remove these restrictions:
|
| 11 |
+
|
| 12 |
+
* Previously, if you recorded graphs A, B, you had to replay A, B in that
|
| 13 |
+
order. With CUDA graph trees, after replaying A, you can change your
|
| 14 |
+
mind and record/replay a different graph B'; we will support efficient
|
| 15 |
+
execution of both A, B and A, B', using only max(mem(A, B), mem(A, B')). In
|
| 16 |
+
other words: we support arbitrary trees of CUDA graph operations, not just
|
| 17 |
+
sequences (this is why this feature is called CUDA graph trees.)
|
| 18 |
+
|
| 19 |
+
* Previously, if you executed graph A, some non-CUDA graph code, and then
|
| 20 |
+
graph B, after executing graph B, it was not safe to retain any references
|
| 21 |
+
to intermediates produced by A. With CUDA graph trees, we track if any
|
| 22 |
+
outputs of graph A are still live by the time graph B is run, and make
|
| 23 |
+
sure graph B doesn't clobber there memory when reusing the CUDA graphs
|
| 24 |
+
pool. You'll get a separate recording of B depending on what tensors
|
| 25 |
+
stay live or dead.
|
| 26 |
+
|
| 27 |
+
CUDA graph trees are flexible enough to be used in Dynamo across graph breaks,
|
| 28 |
+
which is their primary use case.
|
| 29 |
+
|
| 30 |
+
The ability to switch from replay to record is fairly nontrivial: remember that
|
| 31 |
+
when you replay a CUDA graph, you only replay CUDA operations; no CPU side state
|
| 32 |
+
is updated. In particular, the CPU-side book-keeping for the allocator is not
|
| 33 |
+
reconstructed. However, to record a new child CUDA graph, we must restore this
|
| 34 |
+
book-keeping. This is what checkpoint pool state is used for.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
from __future__ import annotations
|
| 38 |
+
|
| 39 |
+
import contextlib
|
| 40 |
+
import dataclasses
|
| 41 |
+
import functools
|
| 42 |
+
import gc
|
| 43 |
+
import itertools
|
| 44 |
+
import operator
|
| 45 |
+
import sys
|
| 46 |
+
import threading
|
| 47 |
+
import traceback
|
| 48 |
+
import warnings
|
| 49 |
+
import weakref
|
| 50 |
+
from collections import defaultdict
|
| 51 |
+
|
| 52 |
+
from enum import auto, Enum
|
| 53 |
+
from typing import (
|
| 54 |
+
Any,
|
| 55 |
+
Callable,
|
| 56 |
+
cast,
|
| 57 |
+
Dict,
|
| 58 |
+
Iterator,
|
| 59 |
+
List,
|
| 60 |
+
Optional,
|
| 61 |
+
Sequence,
|
| 62 |
+
Set,
|
| 63 |
+
Tuple,
|
| 64 |
+
Union,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
import torch.fx
|
| 68 |
+
from torch import Tensor
|
| 69 |
+
from torch._dynamo.mutation_guard import GenerationTracker
|
| 70 |
+
from torch._dynamo.utils import preserve_rng_state
|
| 71 |
+
from torch._inductor.compile_fx import (
|
| 72 |
+
align_inputs_from_check_idxs,
|
| 73 |
+
copy_misaligned_inputs,
|
| 74 |
+
get_expanded_dims,
|
| 75 |
+
get_input_idxs_to_check,
|
| 76 |
+
index_expanded_dims,
|
| 77 |
+
remove_unaligned_input_idxs,
|
| 78 |
+
static_input,
|
| 79 |
+
)
|
| 80 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 81 |
+
from torch.storage import UntypedStorage
|
| 82 |
+
from torch.types import _bool
|
| 83 |
+
from torch.utils import _pytree as pytree
|
| 84 |
+
from torch.utils.weak import TensorWeakRef
|
| 85 |
+
|
| 86 |
+
StorageWeakRefPointer = int
|
| 87 |
+
StorageDataPtr = int
|
| 88 |
+
NBytes = int
|
| 89 |
+
|
| 90 |
+
if torch.backends.cuda.is_built():
|
| 91 |
+
from torch._C import (
|
| 92 |
+
_cuda_CUDAAllocator_AllocatorState as AllocatorState,
|
| 93 |
+
_set_cached_tensors_enabled as _set_cached_tensors_enabled,
|
| 94 |
+
)
|
| 95 |
+
else:
|
| 96 |
+
|
| 97 |
+
class AllocatorState: # type: ignore[no-redef]
|
| 98 |
+
pass
|
| 99 |
+
|
| 100 |
+
def _set_cached_tensors_enabled(enabled: _bool) -> None:
|
| 101 |
+
pass
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
log = torch._logging.getArtifactLogger(__name__, "cudagraphs")
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
from . import config
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@dataclasses.dataclass(frozen=True)
|
| 111 |
+
class GraphID:
|
| 112 |
+
"Unique counter of a cuda graph recording"
|
| 113 |
+
id: int
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@dataclasses.dataclass(frozen=True)
|
| 117 |
+
class FunctionID:
|
| 118 |
+
"Unique counter of a function wrapped in cudagraphify_impl"
|
| 119 |
+
id: int
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@dataclasses.dataclass(frozen=True)
|
| 123 |
+
class WrappedFunction:
|
| 124 |
+
"""
|
| 125 |
+
Represents a function that you want to record for CUDA graph replay,
|
| 126 |
+
with a little more metadata so we can identify if we have an applicable
|
| 127 |
+
CUDA graph in our CUDA graph tree for it.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
model: Callable[..., Any]
|
| 131 |
+
static_input_idxs: Sequence[int]
|
| 132 |
+
id: FunctionID
|
| 133 |
+
constants: Tuple[torch.Tensor, ...]
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def clear_cublass_cache():
|
| 137 |
+
"""
|
| 138 |
+
Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for
|
| 139 |
+
doing warmup within a CUDAGraph private pool because we do not want persistent allocations from
|
| 140 |
+
one one run to the next. When we begin a new run of a cudagraphs path (generation), all tensors
|
| 141 |
+
from the previous generation are freed. This frees them the memory pool, but not elsewhere.
|
| 142 |
+
A tensor in the cublas workspace would continue to be in use the workspace but would also get allocated
|
| 143 |
+
in the next run. The memory would be in use in two places.
|
| 144 |
+
|
| 145 |
+
To solve this, we clear cublas caches before and after warming up or recording. If a workspace is required
|
| 146 |
+
it will be allocated to the cudagraph private pool and accounted for in the allocator for the duration of the
|
| 147 |
+
program. There is no overhead to this on replay since cudagraphs removes allocation overhead.
|
| 148 |
+
"""
|
| 149 |
+
torch._C._cuda_clearCublasWorkspaces()
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@contextlib.contextmanager
|
| 153 |
+
def clear_cublas_manager():
|
| 154 |
+
"Context manager around clearing cublas caches that will clear on enter and exit"
|
| 155 |
+
clear_cublass_cache()
|
| 156 |
+
try:
|
| 157 |
+
yield
|
| 158 |
+
finally:
|
| 159 |
+
clear_cublass_cache()
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@contextlib.contextmanager
|
| 163 |
+
def disable_conv_cache_emptying():
|
| 164 |
+
prev = torch._C._cuda_get_conv_benchmark_empty_cache()
|
| 165 |
+
torch._C._cudnn_set_conv_benchmark_empty_cache(False)
|
| 166 |
+
try:
|
| 167 |
+
yield
|
| 168 |
+
finally:
|
| 169 |
+
torch._C._cudnn_set_conv_benchmark_empty_cache(prev)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
@contextlib.contextmanager
|
| 173 |
+
def enable_history_recording():
|
| 174 |
+
"Turns on history recording in the CUDA Caching Allocator"
|
| 175 |
+
enabled = torch._C._cuda_isHistoryEnabled()
|
| 176 |
+
try:
|
| 177 |
+
if not enabled:
|
| 178 |
+
torch.cuda.memory._record_memory_history()
|
| 179 |
+
yield
|
| 180 |
+
finally:
|
| 181 |
+
if not enabled:
|
| 182 |
+
torch.cuda.memory._record_memory_history(None)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def get_history_recording():
|
| 186 |
+
# TODO - remove, prevents cleanup
|
| 187 |
+
if not config.triton.cudagraph_trees_history_recording:
|
| 188 |
+
return contextlib.nullcontext()
|
| 189 |
+
return enable_history_recording()
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class TreeManagerContainer:
|
| 193 |
+
"""
|
| 194 |
+
Manages the lifetime of the tree manager. Like `PrivatePool` in cuda caching allocator,
|
| 195 |
+
the tree and its corresponding memory pool should be kept alive as long as any outstanding
|
| 196 |
+
graph or tensor which is an output of a graph remains alive.
|
| 197 |
+
|
| 198 |
+
There is a single tree manager container per device.
|
| 199 |
+
|
| 200 |
+
The lifecycle of a tree_manager is:
|
| 201 |
+
- Is constructed, no graph, no fns, no tensors
|
| 202 |
+
- Tree manager is fetched, resulting in tree manager being allocated
|
| 203 |
+
- We generate a bunch of functions, calling add_strong_reference
|
| 204 |
+
- These functions die, calling finalize_reference
|
| 205 |
+
- When all the functions die, we finalize_tree_manager.
|
| 206 |
+
|
| 207 |
+
TODO: in the future, we would like to do the following once storage weak refs land
|
| 208 |
+
- We look for all the live storages and add references to THOSE
|
| 209 |
+
- We count as storages die
|
| 210 |
+
- All the storages are dead, we deallocate the tree manager
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
def __init__(self, device_index):
|
| 214 |
+
# This class keeps a strong reference to tree_manager,
|
| 215 |
+
# but upon all other strong references to the tree_manager will reset it to None.
|
| 216 |
+
# We need a strong reference so that we can still access its attributes upon cleanup.
|
| 217 |
+
self.tree_manager: Optional[CUDAGraphTreeManager] = None
|
| 218 |
+
|
| 219 |
+
# Number of outstanding references to the current tree manager
|
| 220 |
+
self.live_cudagraphify_fns = 0
|
| 221 |
+
|
| 222 |
+
self.device_index = device_index
|
| 223 |
+
|
| 224 |
+
# Following two objects are only set in the case that Tensor outputs outlive
|
| 225 |
+
# the cudagraphify_fns. Reference to the Graph is needed to keep the private pool from
|
| 226 |
+
# deallocation.
|
| 227 |
+
self.live_storages_count = 0
|
| 228 |
+
self.graph: Optional[torch.cuda.CUDAGraph] = None
|
| 229 |
+
|
| 230 |
+
self.lock = threading.Lock()
|
| 231 |
+
|
| 232 |
+
def _finalize_tensor(self):
|
| 233 |
+
with self.lock:
|
| 234 |
+
self.live_storages_count -= 1
|
| 235 |
+
if self.live_storages_count == 0:
|
| 236 |
+
self.graph = None
|
| 237 |
+
|
| 238 |
+
# manager was used again after existing cleanup,
|
| 239 |
+
# we shouldnt set it to None
|
| 240 |
+
if self.live_cudagraphify_fns == 0:
|
| 241 |
+
self.tree_manager = None
|
| 242 |
+
|
| 243 |
+
def finalize_cudagraphify_fn(self):
|
| 244 |
+
with self.lock:
|
| 245 |
+
self.live_cudagraphify_fns -= 1
|
| 246 |
+
if self.live_cudagraphify_fns == 0:
|
| 247 |
+
self._finalize_tree_manager()
|
| 248 |
+
|
| 249 |
+
def _finalize_tree_manager(self):
|
| 250 |
+
assert self.lock.locked()
|
| 251 |
+
self.tree_manager = None
|
| 252 |
+
|
| 253 |
+
# TODO - when issue #91395 is landed, we can set a weakref on
|
| 254 |
+
# storages and trigger a deallocation when all outputs of the
|
| 255 |
+
# cudagraph are dead.
|
| 256 |
+
|
| 257 |
+
# live_storages = list(
|
| 258 |
+
# tree_manager.live_cudagraph_pool_storages_in_curr_execution()
|
| 259 |
+
# )
|
| 260 |
+
|
| 261 |
+
# # Maintain reference to graph to keep tensors alive
|
| 262 |
+
# assert len(tree_manager.roots) > 0, "expected at least one use"
|
| 263 |
+
# root = next(tree_manager.get_roots())
|
| 264 |
+
# self.graph = root.graph
|
| 265 |
+
# seen_storages = set()
|
| 266 |
+
# for stor in live_storages:
|
| 267 |
+
# if stor in seen_storages:
|
| 268 |
+
# continue
|
| 269 |
+
# seen_storages.add(stor)
|
| 270 |
+
# self.live_storages_count += 1
|
| 271 |
+
# . weakref.finalize(stor, self._finalize_tensor)
|
| 272 |
+
|
| 273 |
+
def add_strong_reference(self, fn: Callable[..., Any]):
|
| 274 |
+
with self.lock:
|
| 275 |
+
self.live_cudagraphify_fns += 1
|
| 276 |
+
|
| 277 |
+
weakref.finalize(fn, self.finalize_cudagraphify_fn)
|
| 278 |
+
|
| 279 |
+
def get_tree_manager(self) -> CUDAGraphTreeManager:
|
| 280 |
+
with self.lock:
|
| 281 |
+
if self.tree_manager is None:
|
| 282 |
+
self.tree_manager = CUDAGraphTreeManager(self.device_index)
|
| 283 |
+
return self.tree_manager
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
local = threading.local()
|
| 287 |
+
|
| 288 |
+
# one tree manager per device
|
| 289 |
+
local.tree_manager_containers = {}
|
| 290 |
+
local.tree_manager_locks = defaultdict(threading.Lock)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
# only incremented by user call of mark_step_begin
|
| 294 |
+
class MarkStepBox:
|
| 295 |
+
mark_step_counter = 0
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
# We need to register this as an object that will be copied over as TLS when new
|
| 299 |
+
# threads are created in autograd
|
| 300 |
+
torch._C._stash_obj_in_tls("tree_manager_containers", local.tree_manager_containers)
|
| 301 |
+
torch._C._stash_obj_in_tls("tree_manager_locks", local.tree_manager_locks)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def mark_step_begin():
|
| 305 |
+
"Indicates that a new iteration of inference or training is about to begin."
|
| 306 |
+
|
| 307 |
+
# iterate down to distinguish from GenerationTracking counter
|
| 308 |
+
MarkStepBox.mark_step_counter -= 1
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def reset_cudagraph_trees():
|
| 312 |
+
"Clear all cudagraph trees"
|
| 313 |
+
# see shutdown below for why this is necessary
|
| 314 |
+
container_dict = get_obj(local, "tree_manager_containers")
|
| 315 |
+
locks_dict = get_obj(local, "tree_manager_locks")
|
| 316 |
+
for device, lock in locks_dict.items():
|
| 317 |
+
with lock:
|
| 318 |
+
container = container_dict.get(device)
|
| 319 |
+
if not container or not container.tree_manager:
|
| 320 |
+
continue
|
| 321 |
+
|
| 322 |
+
container.tree_manager.shutdown()
|
| 323 |
+
|
| 324 |
+
_set_cached_tensors_enabled(False)
|
| 325 |
+
container_dict.clear()
|
| 326 |
+
|
| 327 |
+
MarkStepBox.mark_step_counter = 0
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def get_obj(local, attr_name):
|
| 331 |
+
if hasattr(local, attr_name):
|
| 332 |
+
return getattr(local, attr_name)
|
| 333 |
+
else:
|
| 334 |
+
assert torch._C._is_key_in_tls(attr_name)
|
| 335 |
+
return torch._C._get_obj_in_tls(attr_name)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def get_container(device_index: int):
|
| 339 |
+
container_dict = get_obj(local, "tree_manager_containers")
|
| 340 |
+
lock = get_obj(local, "tree_manager_locks")[device_index]
|
| 341 |
+
|
| 342 |
+
with lock:
|
| 343 |
+
if device_index not in container_dict:
|
| 344 |
+
container_dict[device_index] = TreeManagerContainer(device_index)
|
| 345 |
+
|
| 346 |
+
return container_dict[device_index]
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def get_manager(
|
| 350 |
+
device_index: int, create_if_none_exists=True
|
| 351 |
+
) -> Optional[CUDAGraphTreeManager]:
|
| 352 |
+
if create_if_none_exists:
|
| 353 |
+
return get_container(device_index).get_tree_manager()
|
| 354 |
+
return get_container(device_index).tree_manager
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def cudagraphify_impl(model, inputs, static_input_idxs, *args, **kwargs):
|
| 358 |
+
fn_cache: Dict[Tuple[int, ...], Callable[..., Any]] = {}
|
| 359 |
+
|
| 360 |
+
# Detect int inputs: we need to index on these
|
| 361 |
+
int_key = [i for i, v in enumerate(inputs) if isinstance(v, int)]
|
| 362 |
+
get_ints: Any = operator.itemgetter(*int_key) if int_key else lambda _: None
|
| 363 |
+
|
| 364 |
+
del inputs
|
| 365 |
+
|
| 366 |
+
def deferred_cudagraphify(inputs):
|
| 367 |
+
int_key = get_ints(inputs)
|
| 368 |
+
fn = fn_cache.get(int_key)
|
| 369 |
+
if fn is not None:
|
| 370 |
+
return fn(inputs)
|
| 371 |
+
|
| 372 |
+
if int_key is None:
|
| 373 |
+
log.info("recording cudagraph tree for graph without symints")
|
| 374 |
+
else:
|
| 375 |
+
log.info("recording cudagraph tree for symint key %s", int_key)
|
| 376 |
+
|
| 377 |
+
# first get indices we need to check to align, then update our static inputs,
|
| 378 |
+
# and finally copy
|
| 379 |
+
check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 380 |
+
new_static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
|
| 381 |
+
copy_misaligned_inputs(inputs, check_input_idxs)
|
| 382 |
+
|
| 383 |
+
fn, out = cudagraphify(model, inputs, new_static_input_idxs, *args, **kwargs)
|
| 384 |
+
fn = align_inputs_from_check_idxs(fn, inputs_to_check=check_input_idxs)
|
| 385 |
+
fn_cache[int_key] = fn
|
| 386 |
+
|
| 387 |
+
return out
|
| 388 |
+
|
| 389 |
+
return deferred_cudagraphify
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def cudagraphify(
|
| 393 |
+
model,
|
| 394 |
+
inputs,
|
| 395 |
+
static_input_idxs=(),
|
| 396 |
+
*,
|
| 397 |
+
device_index: int,
|
| 398 |
+
is_backward: bool,
|
| 399 |
+
is_inference: bool,
|
| 400 |
+
stack_traces: Optional[StackTraces] = None,
|
| 401 |
+
constants: Tuple[torch.Tensor, ...] = (),
|
| 402 |
+
):
|
| 403 |
+
manager = get_container(device_index).get_tree_manager()
|
| 404 |
+
assert not (is_backward and is_inference)
|
| 405 |
+
mode = (
|
| 406 |
+
CompilationMode.BACKWARD
|
| 407 |
+
if is_backward
|
| 408 |
+
else (CompilationMode.INFERENCE if is_inference else CompilationMode.FORWARD)
|
| 409 |
+
)
|
| 410 |
+
|
| 411 |
+
return manager.add_function(
|
| 412 |
+
model,
|
| 413 |
+
inputs,
|
| 414 |
+
static_input_idxs,
|
| 415 |
+
stack_traces,
|
| 416 |
+
mode,
|
| 417 |
+
constants,
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class StorageWeakRefWrapper:
|
| 422 |
+
"""
|
| 423 |
+
Wrapper around a storage weak ref. Will deallocate it upon expiration if invoked.
|
| 424 |
+
"""
|
| 425 |
+
|
| 426 |
+
__slots__ = ["ref", "_data_ptr", "extra_ref_check"]
|
| 427 |
+
|
| 428 |
+
storage_ref: Optional[StorageWeakRef]
|
| 429 |
+
|
| 430 |
+
def __init__(
|
| 431 |
+
self,
|
| 432 |
+
inp: Union[Tensor, UntypedStorage],
|
| 433 |
+
extra_ref_check: Optional[Callable[[], None]] = None,
|
| 434 |
+
):
|
| 435 |
+
"""
|
| 436 |
+
extra_ref_check is an additional check we need to run to check if the
|
| 437 |
+
weak ref has expired. in checking storage use count we assume extra_ref_check
|
| 438 |
+
will hold an additional reference to the storage.
|
| 439 |
+
"""
|
| 440 |
+
if isinstance(inp, Tensor):
|
| 441 |
+
stor = inp.untyped_storage()
|
| 442 |
+
else:
|
| 443 |
+
assert isinstance(inp, UntypedStorage)
|
| 444 |
+
stor = inp
|
| 445 |
+
self.ref = StorageWeakRef(stor)
|
| 446 |
+
self._data_ptr = stor.data_ptr()
|
| 447 |
+
self.extra_ref_check = extra_ref_check
|
| 448 |
+
|
| 449 |
+
@classmethod
|
| 450 |
+
def from_weakref_and_data_ptr(cls, cdata, data_ptr, extra_ref_check=None):
|
| 451 |
+
instance = cls.__new__(cls)
|
| 452 |
+
instance._data_ptr = data_ptr
|
| 453 |
+
instance.ref = StorageWeakRef.from_weakref(cdata)
|
| 454 |
+
instance.extra_ref_check = extra_ref_check
|
| 455 |
+
return instance
|
| 456 |
+
|
| 457 |
+
def __call__(self) -> Optional[StorageWeakRefPointer]:
|
| 458 |
+
if self.expired():
|
| 459 |
+
return None
|
| 460 |
+
|
| 461 |
+
return self.ref.cdata
|
| 462 |
+
|
| 463 |
+
def swap_weakref(self, cdata):
|
| 464 |
+
self.ref.__del__()
|
| 465 |
+
self.ref.cdata = cdata
|
| 466 |
+
|
| 467 |
+
def data_ptr(self) -> int:
|
| 468 |
+
"NB: returns the data ptr even if the storage has expired"
|
| 469 |
+
return self._data_ptr
|
| 470 |
+
|
| 471 |
+
def remove_extra_reference(self):
|
| 472 |
+
self.extra_ref_check = None
|
| 473 |
+
|
| 474 |
+
def expired(self):
|
| 475 |
+
if self.extra_ref_check is not None and not self.extra_ref_check():
|
| 476 |
+
return False
|
| 477 |
+
|
| 478 |
+
# if extra_ref_check is not None we expect an additional reference
|
| 479 |
+
stor_count = torch._C._storage_Use_Count(self.ref.cdata)
|
| 480 |
+
return (stor_count - (self.extra_ref_check is not None)) == 0
|
| 481 |
+
|
| 482 |
+
def __repr__(self):
|
| 483 |
+
if self.ref is None or self.ref.expired():
|
| 484 |
+
return f"StorageWeakRefWrapper to {self.data_ptr()}; dead"
|
| 485 |
+
else:
|
| 486 |
+
return f"StorageWeakRefWrapper to {self.data_ptr()}; alive"
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def is_live(weak_ref: Optional[StorageWeakRefWrapper]) -> bool:
|
| 490 |
+
return maybe_deref(weak_ref) is not None
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def maybe_deref(
|
| 494 |
+
weak_ref: Optional[StorageWeakRefWrapper],
|
| 495 |
+
) -> Optional[Tuple[StorageWeakRefPointer, int]]:
|
| 496 |
+
if weak_ref is None:
|
| 497 |
+
return None
|
| 498 |
+
r = weak_ref()
|
| 499 |
+
if r is None:
|
| 500 |
+
return None
|
| 501 |
+
# NB: r.data_ptr() does not necessarily equal weak_ref.data_ptr()
|
| 502 |
+
return r, weak_ref.data_ptr()
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
@contextlib.contextmanager
|
| 506 |
+
def _use_cuda_memory_pool_manager(device, mem_pool, stream):
|
| 507 |
+
"""
|
| 508 |
+
Context manager to use cuda graph pool for new allocations. If you use this manager
|
| 509 |
+
all cudagraph tensors in use should be reflected in the allocator or they will be overwritten.
|
| 510 |
+
existing_graph should already have been used in a capture, and the mem_pool must already exist,
|
| 511 |
+
because this manager will not preserve a reference to the pool which keeps it alive.
|
| 512 |
+
"""
|
| 513 |
+
torch.cuda.synchronize()
|
| 514 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 515 |
+
|
| 516 |
+
with torch.cuda.stream(stream), torch.device(device):
|
| 517 |
+
torch._C._cuda_beginAllocateCurrentStreamToPool(device, mem_pool)
|
| 518 |
+
try:
|
| 519 |
+
yield
|
| 520 |
+
finally:
|
| 521 |
+
torch._C._cuda_endAllocateCurrentStreamToPool(device, mem_pool)
|
| 522 |
+
torch._C._cuda_releasePool(device, mem_pool)
|
| 523 |
+
|
| 524 |
+
torch.cuda.current_stream().wait_stream(stream)
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def map_to_ref(t: Optional[Tensor]) -> Optional[StorageWeakRefWrapper]:
|
| 528 |
+
if not isinstance(t, torch.Tensor):
|
| 529 |
+
assert t is None
|
| 530 |
+
return None
|
| 531 |
+
return StorageWeakRefWrapper(t)
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
# A path index of (depth, offset) indices into a graph that is `depth`` number of nodes from the root
|
| 535 |
+
# at graph output offset
|
| 536 |
+
PathOutputIndex = Tuple[int, int]
|
| 537 |
+
|
| 538 |
+
# For each node in the path, for each output, is the output alive
|
| 539 |
+
PathLiveness = List[List[bool]]
|
| 540 |
+
|
| 541 |
+
StackTraces = List[Optional[str]]
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
class CUDAWarmupNode:
|
| 545 |
+
"""
|
| 546 |
+
Simplified Wrapper around A CUDA Model that wraps outputs in storage refs and exposes
|
| 547 |
+
apis to get the live storages in the current chain of warmup.
|
| 548 |
+
|
| 549 |
+
A CUDAWarmupNode may have either CUDAGraphNode or CUDAWarmupNode as a parent, but may only have
|
| 550 |
+
CUDAWarmupNode as children, because we cannot record or execute with tensors which do not have stable
|
| 551 |
+
memory addresses.
|
| 552 |
+
|
| 553 |
+
CUDAWarmupNode and CUDAGraphNode have a number of differences that make it easier to use separate classes.
|
| 554 |
+
- Much of the CUDAGraphNode logic & initialization is based on the tensor properties of first recording. In the
|
| 555 |
+
first instance of warmup, these are not finalized yet.
|
| 556 |
+
- All Inputs to the RecordedFunction must be copied over to the cuda graph memory pool, this is unnecessary in warmup.
|
| 557 |
+
- CUDAWarmup is only used once and so does not need to optimize as much bookkeeping. It is much simpler.
|
| 558 |
+
|
| 559 |
+
NB: this class and CUDAGraphNode need to expose `path_live_weakrefs`, `all_outputs_are_dead`, and
|
| 560 |
+
`self.outputs_weakrefs`, `stack_traces`, and `tensor_weakrefs` for compatibility.
|
| 561 |
+
"""
|
| 562 |
+
|
| 563 |
+
def __init__(
|
| 564 |
+
self,
|
| 565 |
+
wrapped_function: WrappedFunction,
|
| 566 |
+
parent,
|
| 567 |
+
cuda_graphs_pool: Tuple[int, int],
|
| 568 |
+
existing_cuda_graph: Optional[torch.cuda.CUDAGraph],
|
| 569 |
+
device_index: int,
|
| 570 |
+
stack_traces: Optional[StackTraces],
|
| 571 |
+
stream: torch.cuda.Stream,
|
| 572 |
+
already_warm: bool,
|
| 573 |
+
):
|
| 574 |
+
self.wrapped_function = wrapped_function
|
| 575 |
+
self.parent = parent
|
| 576 |
+
self.cuda_graphs_pool = cuda_graphs_pool
|
| 577 |
+
self.outputs_weakrefs: List[Optional[StorageWeakRefWrapper]] = []
|
| 578 |
+
self.tensor_weakrefs: List[Optional[TensorWeakRef]] = []
|
| 579 |
+
self.existing_cuda_graph = existing_cuda_graph
|
| 580 |
+
self.has_run = False
|
| 581 |
+
self.device_index = device_index
|
| 582 |
+
self.stack_traces = stack_traces
|
| 583 |
+
self.stream = stream
|
| 584 |
+
self.already_warm = already_warm
|
| 585 |
+
|
| 586 |
+
def run(self, new_inputs):
|
| 587 |
+
assert not self.has_run, "Wrapped function should never be run twice"
|
| 588 |
+
|
| 589 |
+
# See: output_is_alias_of_persistent_static_inputs below. We should only be returning freshly created
|
| 590 |
+
# storages in path_live_weakrefs.
|
| 591 |
+
existing_path_data_ptrs = {
|
| 592 |
+
t.data_ptr() for t in self.path_live_weakrefs() if t()
|
| 593 |
+
}
|
| 594 |
+
|
| 595 |
+
def get_non_cudagraph_inps():
|
| 596 |
+
non_cudagraph_inps = set()
|
| 597 |
+
for t in itertools.chain(new_inputs, self.wrapped_function.constants):
|
| 598 |
+
if (
|
| 599 |
+
isinstance(t, torch.Tensor)
|
| 600 |
+
and t.untyped_storage().data_ptr() not in existing_path_data_ptrs
|
| 601 |
+
):
|
| 602 |
+
non_cudagraph_inps.add(t.untyped_storage().data_ptr())
|
| 603 |
+
return non_cudagraph_inps
|
| 604 |
+
|
| 605 |
+
non_cudagraph_inps = get_non_cudagraph_inps()
|
| 606 |
+
|
| 607 |
+
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
|
| 608 |
+
refs = list(self.path_live_weakrefs())
|
| 609 |
+
check_memory_pool(self.device_index, self.cuda_graphs_pool, refs)
|
| 610 |
+
|
| 611 |
+
with torch.cuda.device(
|
| 612 |
+
self.device_index
|
| 613 |
+
), disable_conv_cache_emptying(), clear_cublas_manager(), _use_cuda_memory_pool_manager(
|
| 614 |
+
self.device_index, self.cuda_graphs_pool, self.stream
|
| 615 |
+
), get_history_recording():
|
| 616 |
+
out = self.wrapped_function.model(new_inputs)
|
| 617 |
+
|
| 618 |
+
assert len(new_inputs) == 0
|
| 619 |
+
|
| 620 |
+
# sdpa returns cpu tensors when not recording cuda graph
|
| 621 |
+
def add_ref(o):
|
| 622 |
+
return (
|
| 623 |
+
o is not None
|
| 624 |
+
and isinstance(o, torch.Tensor)
|
| 625 |
+
and o.is_cuda
|
| 626 |
+
and o.untyped_storage().data_ptr() not in non_cudagraph_inps
|
| 627 |
+
and o.untyped_storage().data_ptr() != 0
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
self.outputs_weakrefs.extend(
|
| 631 |
+
[map_to_ref(o) if add_ref(o) else None for o in out]
|
| 632 |
+
)
|
| 633 |
+
self.tensor_weakrefs.extend(
|
| 634 |
+
[TensorWeakRef(o) if add_ref(o) else None for o in out]
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
|
| 638 |
+
out_refs = self.path_live_weakrefs()
|
| 639 |
+
new_storages = [
|
| 640 |
+
t for t in out_refs if t.data_ptr() not in non_cudagraph_inps
|
| 641 |
+
]
|
| 642 |
+
check_memory_pool(self.device_index, self.cuda_graphs_pool, new_storages)
|
| 643 |
+
|
| 644 |
+
return out
|
| 645 |
+
|
| 646 |
+
@property
|
| 647 |
+
def _path_from_root(self):
|
| 648 |
+
nodes = []
|
| 649 |
+
node = self
|
| 650 |
+
while node:
|
| 651 |
+
nodes.append(node)
|
| 652 |
+
node = node.parent
|
| 653 |
+
|
| 654 |
+
yield from reversed(nodes)
|
| 655 |
+
|
| 656 |
+
def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
|
| 657 |
+
"Returns all live storages weakrefs that created by nodes in this path"
|
| 658 |
+
for node in self._path_from_root:
|
| 659 |
+
for output in node.outputs_weakrefs:
|
| 660 |
+
if is_live(output):
|
| 661 |
+
yield output
|
| 662 |
+
|
| 663 |
+
def all_outputs_are_dead(self):
|
| 664 |
+
return not list(self.path_live_weakrefs())
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
# Aliases for List that say what the indices denote
|
| 668 |
+
InputList = List # input indexes
|
| 669 |
+
OutputList = List # output indexes
|
| 670 |
+
LevelList = List # levels (distance from root of tree)
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
class OutputAliasInfo:
|
| 674 |
+
pass
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
class _UnaliasedStorage(OutputAliasInfo):
|
| 678 |
+
"Singleton to mark that the graph output constructs a new alias or is None"
|
| 679 |
+
pass
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
UnaliasedStorage = _UnaliasedStorage()
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
class AliasesPriorGraphOutput(OutputAliasInfo):
|
| 686 |
+
"Marks that the graph output aliases an output of a prior graph"
|
| 687 |
+
__slots__ = ["index"]
|
| 688 |
+
|
| 689 |
+
index: PathOutputIndex
|
| 690 |
+
|
| 691 |
+
def __init__(self, index: PathOutputIndex):
|
| 692 |
+
assert isinstance(index, tuple)
|
| 693 |
+
self.index = index
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
class AliasesNewOutput(OutputAliasInfo):
|
| 697 |
+
"Marks that the graph output aliases an index in the new, returned outputs"
|
| 698 |
+
|
| 699 |
+
__slots__ = ["index"]
|
| 700 |
+
|
| 701 |
+
index: int
|
| 702 |
+
|
| 703 |
+
def __init__(self, index):
|
| 704 |
+
assert isinstance(index, int)
|
| 705 |
+
self.index = index
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
class CUDAGraphNode:
|
| 709 |
+
"""
|
| 710 |
+
A single recording of a function into a CUDA Graph. Recordings of CUDA Graphs share a single memory pool
|
| 711 |
+
and are structured into a tree, where there is a single recording that can precede it (parent) and multiple
|
| 712 |
+
subsequent recordings that may follow (children). A node will have no parent if it is the first recording
|
| 713 |
+
in a tree; i.e., when it is first recorded, there are no live tensors from a previous recording which
|
| 714 |
+
would force a dependency.
|
| 715 |
+
|
| 716 |
+
On first recording, all of the live tensors in the current CUDA Graph Node path will be
|
| 717 |
+
reflected in the corresponding private pool. On subsequent executions, the caching allocator
|
| 718 |
+
is unaffected when the graph is replayed.
|
| 719 |
+
|
| 720 |
+
In order to support recording a subsequent cuda graph recording after execution of this graph,
|
| 721 |
+
we checkpoint the state of the memory pool so that it may later be resumed.
|
| 722 |
+
|
| 723 |
+
WrappedFunction should have already been warmed up prior to invocation.
|
| 724 |
+
|
| 725 |
+
See [setCheckpointPoolState] for further explanation, as well as
|
| 726 |
+
https://user-images.githubusercontent.com/13564/222815509-374f3400-f83d-4f7d-8fa6-4a092b3250bb.png
|
| 727 |
+
"""
|
| 728 |
+
|
| 729 |
+
def __init__(
|
| 730 |
+
self,
|
| 731 |
+
wrapped_function: WrappedFunction,
|
| 732 |
+
id: GraphID,
|
| 733 |
+
parent: Optional[CUDAGraphNode],
|
| 734 |
+
inputs: List[Tensor],
|
| 735 |
+
cuda_graphs_pool: Tuple[int, int],
|
| 736 |
+
device_index: int,
|
| 737 |
+
stack_traces: Optional[StackTraces],
|
| 738 |
+
stream: torch.cuda.Stream,
|
| 739 |
+
):
|
| 740 |
+
assert isinstance(inputs, (list, tuple))
|
| 741 |
+
|
| 742 |
+
self.wrapped_function = wrapped_function
|
| 743 |
+
self.id = id
|
| 744 |
+
self.device = device_index
|
| 745 |
+
self.stack_traces = stack_traces
|
| 746 |
+
self.stream = stream
|
| 747 |
+
|
| 748 |
+
# if this is a root parent will be None. use weakref to prevent reference cycle
|
| 749 |
+
self._parent = weakref.ref(parent) if parent is not None else None
|
| 750 |
+
# reference to the shared memory pool for the entire cuda graphs tree
|
| 751 |
+
self.cuda_graphs_pool = cuda_graphs_pool
|
| 752 |
+
|
| 753 |
+
# A single wrapped function may be recorded multiple times if memory patterns or
|
| 754 |
+
# invariants change from one execution to the next
|
| 755 |
+
self.children: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
|
| 756 |
+
|
| 757 |
+
# StorageWeakRef maintains whether the Storage C++ object remains allocated,
|
| 758 |
+
# not whether the corresponding memory has been deallocated. In order
|
| 759 |
+
# to use them to track memory deallocations we must maintain a single StorageWeakRef
|
| 760 |
+
# for all Storages that reference that memory (even if we are constructing Storages
|
| 761 |
+
# that do not have a deallocator function). We maintain one single storage_cache
|
| 762 |
+
# as we execute any tree path. When we retrieve a storage from the cache we
|
| 763 |
+
# check that it is still alive, and we hash based on observed recording data ptr
|
| 764 |
+
# and storage cdata.
|
| 765 |
+
|
| 766 |
+
# we preserve a single reference to executed outputs that is then referenced
|
| 767 |
+
# in children to avoid children having to chase parent pointers in the hot path
|
| 768 |
+
# DO NOT reassign output_weakrefs, only call `clear()`
|
| 769 |
+
# Path is a series of nodes from root to the current node
|
| 770 |
+
self.outputs_weakrefs: OutputList[Optional[StorageWeakRefWrapper]] = []
|
| 771 |
+
self.path_weakrefs: LevelList[OutputList[Optional[StorageWeakRefWrapper]]] = [
|
| 772 |
+
node.outputs_weakrefs for node in self._path_from_root
|
| 773 |
+
]
|
| 774 |
+
self.path_stacktraces: LevelList[StackTraces] = [
|
| 775 |
+
node.stack_traces for node in self._path_from_root
|
| 776 |
+
]
|
| 777 |
+
self.tensor_weakrefs: OutputList[Optional[TensorWeakRef]] = []
|
| 778 |
+
|
| 779 |
+
# tensors which are outputs of previous graphs in the tree
|
| 780 |
+
self.cudagraph_managed_idxs: List[int] = [
|
| 781 |
+
idx
|
| 782 |
+
for idx, t in enumerate(inputs)
|
| 783 |
+
if isinstance(t, torch.Tensor) and self._is_cuda_graph_recorded_tensor(t)
|
| 784 |
+
]
|
| 785 |
+
|
| 786 |
+
self.static_input_idxs: List[int] = list(
|
| 787 |
+
set(wrapped_function.static_input_idxs) | set(self.cudagraph_managed_idxs)
|
| 788 |
+
)
|
| 789 |
+
|
| 790 |
+
self.static_input_data_ptrs: InputList[Optional[int]] = [
|
| 791 |
+
(
|
| 792 |
+
inputs[i].data_ptr()
|
| 793 |
+
if isinstance(inputs[i], torch.Tensor) and i in self.static_input_idxs
|
| 794 |
+
else None
|
| 795 |
+
)
|
| 796 |
+
for i in range(len(inputs))
|
| 797 |
+
]
|
| 798 |
+
|
| 799 |
+
# When we checkpoint, and free generations, we will be manually freeing the outputs
|
| 800 |
+
# of CUDAGraphNodes. We should not be freeing parameters, not do we need to account for
|
| 801 |
+
# their liveness (they are static), so we need to compute which outputs are aliases of
|
| 802 |
+
# parameters. Some static inputs are saved tensors from the forward that die in the backward.
|
| 803 |
+
# Their locations are static but lifetimes are not. We only include the persistent static
|
| 804 |
+
# data ptrs below because the non persistent data ptrs may be outputs of this record and
|
| 805 |
+
# fresh allocations.
|
| 806 |
+
|
| 807 |
+
# precompute expanded dims to avoid computing in the hot path
|
| 808 |
+
self.expanded_dims: List[List[int]] = [
|
| 809 |
+
get_expanded_dims(x)
|
| 810 |
+
if isinstance(x, torch.Tensor) and idx not in self.static_input_idxs
|
| 811 |
+
else []
|
| 812 |
+
for idx, x in enumerate(inputs)
|
| 813 |
+
]
|
| 814 |
+
|
| 815 |
+
# For each node in path, which outputs were observed to be live
|
| 816 |
+
# before invoking graph recording, and after graph recording
|
| 817 |
+
self.recorded_liveness_before_graph: LevelList[OutputList[bool]] = []
|
| 818 |
+
self.recorded_liveness_after_graph: LevelList[OutputList[bool]] = []
|
| 819 |
+
|
| 820 |
+
# List of Tuples of (depth, output_index) that index into node at depth
|
| 821 |
+
# number of nodes from root and output_index of outputs. Will index into
|
| 822 |
+
# path_weakrefs.
|
| 823 |
+
self.expected_dead_indices_before_graph: List[PathOutputIndex] = []
|
| 824 |
+
self.expected_dead_indices_after_graph: List[PathOutputIndex] = []
|
| 825 |
+
|
| 826 |
+
# all live indices after graph recording
|
| 827 |
+
self.live_indices_after_graph: List[PathOutputIndex] = []
|
| 828 |
+
|
| 829 |
+
if self.parent is not None:
|
| 830 |
+
previous_liveness = self.parent.recorded_liveness_after_graph
|
| 831 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 832 |
+
|
| 833 |
+
different_indices = self._get_different_indices(
|
| 834 |
+
previous_liveness, curr_liveness
|
| 835 |
+
)
|
| 836 |
+
|
| 837 |
+
self.recorded_liveness_before_graph = curr_liveness
|
| 838 |
+
self.expected_dead_indices_before_graph = different_indices
|
| 839 |
+
|
| 840 |
+
recording_inputs = self._allocate_and_copy_recording_inputs(inputs)
|
| 841 |
+
# recording inputs will copy over memory, so we can free non recording inputs
|
| 842 |
+
inputs.clear()
|
| 843 |
+
del inputs
|
| 844 |
+
|
| 845 |
+
# graph used for recording model invocation
|
| 846 |
+
self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
|
| 847 |
+
|
| 848 |
+
# we allocate non-static inputs within the same memory pool as the CUDAGraph
|
| 849 |
+
# which we will record the model with. For memory efficiency, it is important
|
| 850 |
+
# to reclaim the input memory when the inputs are no longer live. To accomplish this,
|
| 851 |
+
# we reconstruct tensors at the correct data pointers of our inputs which are
|
| 852 |
+
# non owning and do not prevent deallocation. On subsequent executions, input values
|
| 853 |
+
# will be copied over to these tensors.
|
| 854 |
+
self.reconstructed_inputs: InputList[Union[Tensor, int]] = [
|
| 855 |
+
self._reconstruct_from_tensor_metadata(self._tensor_metadata(x))
|
| 856 |
+
if isinstance(x, torch.Tensor)
|
| 857 |
+
else x
|
| 858 |
+
for x in recording_inputs
|
| 859 |
+
]
|
| 860 |
+
|
| 861 |
+
# DO THE RECORDING!!!
|
| 862 |
+
# We record the CUDA graph in the constructor of CUDAGraphNode, which
|
| 863 |
+
# gives you what the CPU side compute of the function would do. We
|
| 864 |
+
# don't throw the recording outputs away: their memory is
|
| 865 |
+
# correctly accounted for in the CUDAGraphs caching allocator. This
|
| 866 |
+
# means on the very FIRST run of the CUDA graph node, we can directly
|
| 867 |
+
# do more recording, because we have a valid caching allocator state.
|
| 868 |
+
# NB: This relies on run() being called immediately after the
|
| 869 |
+
# constructor, otherwise this optimization would not be valid.
|
| 870 |
+
|
| 871 |
+
# initialized below in _record
|
| 872 |
+
|
| 873 |
+
self.checkpointed_caching_state: Optional[AllocatorState] = None
|
| 874 |
+
|
| 875 |
+
# Output Storage Alias information, can be:
|
| 876 |
+
# - A new, unaliased storage, or the output is None
|
| 877 |
+
# - An alias of an output of a prior graph
|
| 878 |
+
# - An alias of an output already created in the reconstructed outputs
|
| 879 |
+
# This is None if the output in question is an int
|
| 880 |
+
self.output_storage_alias: OutputList[Optional[OutputAliasInfo]] = []
|
| 881 |
+
|
| 882 |
+
# is the output Storage unaliased in subsequent outputs, of all subsequent paths
|
| 883 |
+
# if it is, we cached the output tensor and adjust storage liveness tracking to also
|
| 884 |
+
# check if the output tensor does not have an additional python reference.
|
| 885 |
+
# If a descendent node discovers it has an alias of a prior output, then the output
|
| 886 |
+
# will no longer be cached in the ancestor.
|
| 887 |
+
# The large majority of tensors are unaliased, and preserving aliased output tensors would add
|
| 888 |
+
# significant additional complexity with marginal gains
|
| 889 |
+
# The cached tensor outputs are added on the first execution, and cleared whenever we need
|
| 890 |
+
# to do subsequent recording
|
| 891 |
+
self.unaliased_in_all_paths: OutputList[bool] = []
|
| 892 |
+
self.cached_tensor_outputs: OutputList[Optional[Tensor]] = []
|
| 893 |
+
|
| 894 |
+
# if an output aliases a static, persistent input then the corresponding Tensor will
|
| 895 |
+
# be set here. These are different than cached tensors, because they are tensors that
|
| 896 |
+
# are aliases of parameters that are always live.
|
| 897 |
+
self.static_output_tensors: OutputList[Optional[Tensor]] = []
|
| 898 |
+
|
| 899 |
+
# Cleared after recording
|
| 900 |
+
self.recording_outputs: Optional[
|
| 901 |
+
OutputList[Union[torch.Tensor, int]]
|
| 902 |
+
] = self._record(wrapped_function.model, recording_inputs)
|
| 903 |
+
self.outputs_metadata: OutputList[Union[Dict[str, Any], int, None]] = []
|
| 904 |
+
|
| 905 |
+
# As with inputs, we do not want to keep the outputs permanently alive because that would prevent
|
| 906 |
+
# their memory being reclaimed in subsequent cuda graph recordings. We record the tensor metadata
|
| 907 |
+
# needed to reconstruct instead.
|
| 908 |
+
assert self.recording_outputs is not None
|
| 909 |
+
for out in self.recording_outputs:
|
| 910 |
+
if isinstance(out, torch.Tensor):
|
| 911 |
+
self.outputs_metadata.append(
|
| 912 |
+
self._tensor_metadata(out, ignore_storage_offset=False)
|
| 913 |
+
)
|
| 914 |
+
else:
|
| 915 |
+
assert isinstance(out, (int, type(None))), type(out)
|
| 916 |
+
self.outputs_metadata.append(out)
|
| 917 |
+
|
| 918 |
+
self.graph.replay()
|
| 919 |
+
|
| 920 |
+
def _copy_input(self, idx, dst, src):
|
| 921 |
+
expanded_dims = self.expanded_dims[idx]
|
| 922 |
+
dst = index_expanded_dims(dst, expanded_dims)
|
| 923 |
+
src = index_expanded_dims(src, expanded_dims)
|
| 924 |
+
# TODO - one jit kernel across multiple inputs
|
| 925 |
+
dst.copy_(src)
|
| 926 |
+
|
| 927 |
+
def run_first_inputs(self, new_inputs):
|
| 928 |
+
if config.triton.fast_path_cudagraph_asserts:
|
| 929 |
+
self.debug_check_invariants_before_invocation()
|
| 930 |
+
|
| 931 |
+
# graph is already invoked in the __init__
|
| 932 |
+
# inputs are copied over in _allocate_recording_inputs and subsequently cleared
|
| 933 |
+
assert len(new_inputs) == 0
|
| 934 |
+
outputs = self.recording_outputs
|
| 935 |
+
self.recording_outputs = None
|
| 936 |
+
return outputs
|
| 937 |
+
|
| 938 |
+
def run(self, new_inputs):
|
| 939 |
+
if config.triton.fast_path_cudagraph_asserts:
|
| 940 |
+
self.debug_check_invariants_before_invocation()
|
| 941 |
+
|
| 942 |
+
assert len(self.static_input_data_ptrs) == len(new_inputs)
|
| 943 |
+
# NB: this ranges over non-static inputs too
|
| 944 |
+
for idx, data_ptr in enumerate(self.static_input_data_ptrs):
|
| 945 |
+
if idx in self.cudagraph_managed_idxs:
|
| 946 |
+
continue
|
| 947 |
+
if not isinstance(new_inputs[idx], torch.Tensor):
|
| 948 |
+
pass
|
| 949 |
+
elif data_ptr is not None:
|
| 950 |
+
# static input, e.g., parameter
|
| 951 |
+
assert data_ptr == new_inputs[idx].data_ptr()
|
| 952 |
+
else:
|
| 953 |
+
# non-static input, need to copy it into CUDA graph
|
| 954 |
+
dst = self.reconstructed_inputs[idx]
|
| 955 |
+
src = new_inputs[idx]
|
| 956 |
+
self._copy_input(idx, dst, src)
|
| 957 |
+
|
| 958 |
+
new_inputs.clear()
|
| 959 |
+
self.run_graph()
|
| 960 |
+
|
| 961 |
+
outputs = self.reconstruct_outputs()
|
| 962 |
+
self.debug_check_invariants_after_invocation()
|
| 963 |
+
|
| 964 |
+
return outputs
|
| 965 |
+
|
| 966 |
+
def reconstruct_outputs(self):
|
| 967 |
+
"Reconstruct output tensors according to their saved metadata and alias information"
|
| 968 |
+
|
| 969 |
+
# Cached tensors will not yet be set on the first execution
|
| 970 |
+
# They are also cleared in checkpointing, so if we checkpoint this node
|
| 971 |
+
# and then execute it again we will need to repopulate cached tensors
|
| 972 |
+
if not self.cached_tensor_outputs:
|
| 973 |
+
self._initialize_cached_tensors()
|
| 974 |
+
|
| 975 |
+
outputs: List[Optional[Union[int, torch.Tensor]]] = []
|
| 976 |
+
|
| 977 |
+
for i, (storage_info, metadata) in enumerate(
|
| 978 |
+
zip(self.output_storage_alias, self.outputs_metadata)
|
| 979 |
+
):
|
| 980 |
+
if not isinstance(metadata, dict): # tensor metadata
|
| 981 |
+
assert isinstance(metadata, (int, type(None)))
|
| 982 |
+
outputs.append(metadata)
|
| 983 |
+
continue
|
| 984 |
+
|
| 985 |
+
cached_t = self.cached_tensor_outputs[i]
|
| 986 |
+
if cached_t is not None:
|
| 987 |
+
# No need to update weakrefs, already correctly initialized
|
| 988 |
+
outputs.append(cached_t)
|
| 989 |
+
continue
|
| 990 |
+
|
| 991 |
+
static_t = self.static_output_tensors[i]
|
| 992 |
+
if static_t is not None:
|
| 993 |
+
assert self.outputs_weakrefs[i] is None
|
| 994 |
+
outputs.append(static_t)
|
| 995 |
+
continue
|
| 996 |
+
|
| 997 |
+
storage = self.prepare_alias_info_for_tensor_construction(
|
| 998 |
+
storage_info, metadata
|
| 999 |
+
)
|
| 1000 |
+
|
| 1001 |
+
if isinstance(storage, UntypedStorage) or storage is None:
|
| 1002 |
+
out = self._reconstruct_from_tensor_metadata(metadata, storage)
|
| 1003 |
+
else:
|
| 1004 |
+
assert isinstance(storage, int)
|
| 1005 |
+
out = self._reconstruct_from_tensor_metadata(
|
| 1006 |
+
metadata, cast(torch.Tensor, outputs[storage]).untyped_storage()
|
| 1007 |
+
)
|
| 1008 |
+
|
| 1009 |
+
outputs.append(out)
|
| 1010 |
+
w = self.outputs_weakrefs[i]
|
| 1011 |
+
assert w is not None
|
| 1012 |
+
w.swap_weakref(out.untyped_storage()._weak_ref())
|
| 1013 |
+
|
| 1014 |
+
return outputs
|
| 1015 |
+
|
| 1016 |
+
def prepare_alias_info_for_tensor_construction(
|
| 1017 |
+
self,
|
| 1018 |
+
out_alias_info: Optional[OutputAliasInfo],
|
| 1019 |
+
metadata: Union[Dict[str, Any], int, None],
|
| 1020 |
+
) -> Union[UntypedStorage, None, int]:
|
| 1021 |
+
if (
|
| 1022 |
+
isinstance(metadata, (int, type(None)))
|
| 1023 |
+
or out_alias_info is UnaliasedStorage
|
| 1024 |
+
):
|
| 1025 |
+
return None
|
| 1026 |
+
|
| 1027 |
+
if isinstance(out_alias_info, AliasesPriorGraphOutput):
|
| 1028 |
+
depth, existing_output_index = out_alias_info.index
|
| 1029 |
+
ref = self.path_weakrefs[depth][existing_output_index]
|
| 1030 |
+
assert ref is not None
|
| 1031 |
+
return torch.UntypedStorage._new_with_weak_ptr(ref())
|
| 1032 |
+
|
| 1033 |
+
assert isinstance(out_alias_info, AliasesNewOutput)
|
| 1034 |
+
return out_alias_info.index
|
| 1035 |
+
|
| 1036 |
+
def prepare_storages_for_construction(
|
| 1037 |
+
self,
|
| 1038 |
+
) -> List[Union[UntypedStorage, None, int]]:
|
| 1039 |
+
output_storages = []
|
| 1040 |
+
for output_storage_alias, metadata in zip(
|
| 1041 |
+
self.output_storage_alias, self.outputs_metadata
|
| 1042 |
+
):
|
| 1043 |
+
output_storages.append(
|
| 1044 |
+
self.prepare_alias_info_for_tensor_construction(
|
| 1045 |
+
output_storage_alias, metadata
|
| 1046 |
+
)
|
| 1047 |
+
)
|
| 1048 |
+
|
| 1049 |
+
return output_storages
|
| 1050 |
+
|
| 1051 |
+
def run_graph(self):
|
| 1052 |
+
assert self.graph is not None
|
| 1053 |
+
self.graph.replay()
|
| 1054 |
+
|
| 1055 |
+
def all_outputs_are_dead(self):
|
| 1056 |
+
"All outputs of the path from this node to its root are dead"
|
| 1057 |
+
for depth, output_index in self.live_indices_after_graph:
|
| 1058 |
+
if is_live(self.path_weakrefs[depth][output_index]):
|
| 1059 |
+
return False
|
| 1060 |
+
return True
|
| 1061 |
+
|
| 1062 |
+
def _record(self, model, inputs):
|
| 1063 |
+
"Record the model"
|
| 1064 |
+
|
| 1065 |
+
def static_input_iter():
|
| 1066 |
+
for i in self.wrapped_function.static_input_idxs:
|
| 1067 |
+
if isinstance(
|
| 1068 |
+
inputs[i], torch.Tensor
|
| 1069 |
+
) and not self._is_cuda_graph_recorded_tensor(inputs[i]):
|
| 1070 |
+
yield inputs[i]
|
| 1071 |
+
|
| 1072 |
+
# see: output_is_alias_of_persistent_static_inputs above
|
| 1073 |
+
static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper] = {
|
| 1074 |
+
inp.untyped_storage().data_ptr(): StorageWeakRefWrapper(inp)
|
| 1075 |
+
for inp in itertools.chain(
|
| 1076 |
+
static_input_iter(), self.wrapped_function.constants
|
| 1077 |
+
)
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 1081 |
+
# need to use parent live weakrefs because live_indices isnt set yet
|
| 1082 |
+
memory = (
|
| 1083 |
+
[] if self.parent is None else list(self.parent.path_live_weakrefs())
|
| 1084 |
+
)
|
| 1085 |
+
memory += [
|
| 1086 |
+
StorageWeakRefWrapper(elem)
|
| 1087 |
+
for i, elem in enumerate(inputs)
|
| 1088 |
+
if isinstance(elem, torch.Tensor)
|
| 1089 |
+
and i not in self.wrapped_function.static_input_idxs
|
| 1090 |
+
and elem.untyped_storage().data_ptr() != 0
|
| 1091 |
+
]
|
| 1092 |
+
check_memory_pool(self.device, self.cuda_graphs_pool, memory)
|
| 1093 |
+
|
| 1094 |
+
with preserve_rng_state(), torch.cuda.device(
|
| 1095 |
+
self.device
|
| 1096 |
+
), clear_cublas_manager(), torch.cuda.graph(
|
| 1097 |
+
self.graph,
|
| 1098 |
+
stream=self.stream,
|
| 1099 |
+
pool=self.cuda_graphs_pool,
|
| 1100 |
+
capture_error_mode="thread_local",
|
| 1101 |
+
), get_history_recording():
|
| 1102 |
+
static_outputs = model(inputs)
|
| 1103 |
+
|
| 1104 |
+
# running model should reclaim memory
|
| 1105 |
+
assert len(inputs) == 0
|
| 1106 |
+
|
| 1107 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 1108 |
+
static_outputs = (static_outputs,)
|
| 1109 |
+
|
| 1110 |
+
self._add_first_outputs(static_outputs, static_input_persistent_storage_ptrs)
|
| 1111 |
+
|
| 1112 |
+
return static_outputs
|
| 1113 |
+
|
| 1114 |
+
def _add_first_outputs(
|
| 1115 |
+
self,
|
| 1116 |
+
outputs,
|
| 1117 |
+
static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper],
|
| 1118 |
+
):
|
| 1119 |
+
"Add the outputs from the first invocation of the node and set up metadata"
|
| 1120 |
+
|
| 1121 |
+
# getting liveness before we have added the outputs to path, so the length
|
| 1122 |
+
# of the two lists is equal
|
| 1123 |
+
prev_liveness = self.recorded_liveness_before_graph
|
| 1124 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 1125 |
+
|
| 1126 |
+
delta = self._get_different_indices(prev_liveness, curr_liveness)
|
| 1127 |
+
self.expected_dead_indices_after_graph = delta
|
| 1128 |
+
|
| 1129 |
+
assert len(self.outputs_weakrefs) == 0
|
| 1130 |
+
# index from data pointer to index in outputs
|
| 1131 |
+
output_new_storages_index: Dict[StorageDataPtr, int] = {}
|
| 1132 |
+
|
| 1133 |
+
self.unaliased_in_all_paths = [False for _ in range(len(outputs))]
|
| 1134 |
+
self.static_output_tensors = [None for _ in range(len(outputs))]
|
| 1135 |
+
|
| 1136 |
+
for i, o in enumerate(outputs):
|
| 1137 |
+
if o is None or not isinstance(o, torch.Tensor):
|
| 1138 |
+
self.output_storage_alias.append(UnaliasedStorage)
|
| 1139 |
+
continue
|
| 1140 |
+
|
| 1141 |
+
torch._check(
|
| 1142 |
+
o.is_cuda or o.untyped_storage().data_ptr() == 0,
|
| 1143 |
+
lambda: (
|
| 1144 |
+
"Expected all cuda outputs in cuda graph recording. Non cuda output "
|
| 1145 |
+
f"from {self.stack_traces[i] if self.stack_traces else '(unknown)'}"
|
| 1146 |
+
),
|
| 1147 |
+
),
|
| 1148 |
+
|
| 1149 |
+
ref = static_input_persistent_storage_ptrs.get(
|
| 1150 |
+
o.untyped_storage().data_ptr(), None
|
| 1151 |
+
)
|
| 1152 |
+
# also treat empty storages as static outputs because we do not need to manage their lifetime
|
| 1153 |
+
# and they should not participate in checkpointing
|
| 1154 |
+
is_empty_storage = o.untyped_storage().data_ptr() == 0
|
| 1155 |
+
if (ref and ref() is not None) or is_empty_storage:
|
| 1156 |
+
self.output_storage_alias.append(None)
|
| 1157 |
+
self.static_output_tensors[i] = o
|
| 1158 |
+
continue
|
| 1159 |
+
|
| 1160 |
+
path_ref = self._is_alias_of_live_recorded_tensor(o)
|
| 1161 |
+
if path_ref is not None:
|
| 1162 |
+
self._mark_prior_graph_output_as_aliased(path_ref)
|
| 1163 |
+
self.output_storage_alias.append(AliasesPriorGraphOutput(path_ref))
|
| 1164 |
+
continue
|
| 1165 |
+
|
| 1166 |
+
if o.untyped_storage().data_ptr() in output_new_storages_index:
|
| 1167 |
+
index = output_new_storages_index[o.untyped_storage().data_ptr()]
|
| 1168 |
+
self.unaliased_in_all_paths[index] = False
|
| 1169 |
+
self.output_storage_alias.append(AliasesNewOutput(index))
|
| 1170 |
+
continue
|
| 1171 |
+
|
| 1172 |
+
output_new_storages_index[o.untyped_storage().data_ptr()] = i
|
| 1173 |
+
self.output_storage_alias.append(UnaliasedStorage)
|
| 1174 |
+
self.unaliased_in_all_paths[i] = True
|
| 1175 |
+
|
| 1176 |
+
if self.stack_traces is None:
|
| 1177 |
+
self.stack_traces = [None for _ in range(len(outputs))]
|
| 1178 |
+
else:
|
| 1179 |
+
assert len(self.stack_traces) == len(
|
| 1180 |
+
outputs
|
| 1181 |
+
), "Wrong number of stack traces passed in"
|
| 1182 |
+
|
| 1183 |
+
assert not self.outputs_weakrefs
|
| 1184 |
+
for out, static_output_tensor in zip(outputs, self.static_output_tensors):
|
| 1185 |
+
if not isinstance(out, torch.Tensor) or static_output_tensor is not None:
|
| 1186 |
+
self.outputs_weakrefs.append(None)
|
| 1187 |
+
self.tensor_weakrefs.append(None)
|
| 1188 |
+
else:
|
| 1189 |
+
self.outputs_weakrefs.append(StorageWeakRefWrapper(out))
|
| 1190 |
+
self.tensor_weakrefs.append(TensorWeakRef(out))
|
| 1191 |
+
|
| 1192 |
+
self.recorded_liveness_after_graph = self._get_liveness(self.path_weakrefs)
|
| 1193 |
+
self.checkpointed_caching_state = torch._C._cuda_getCheckpointState(
|
| 1194 |
+
self.device, self.cuda_graphs_pool
|
| 1195 |
+
)
|
| 1196 |
+
|
| 1197 |
+
# now, get liveness with outputs added
|
| 1198 |
+
for depth in range(len(self.path_weakrefs)):
|
| 1199 |
+
for output_index in range(len(self.path_weakrefs[depth])):
|
| 1200 |
+
if is_live(self.path_weakrefs[depth][output_index]):
|
| 1201 |
+
self.live_indices_after_graph.append((depth, output_index))
|
| 1202 |
+
|
| 1203 |
+
self.debug_check_invariants_after_invocation()
|
| 1204 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 1205 |
+
check_memory_pool(
|
| 1206 |
+
self.device, self.cuda_graphs_pool, list(self.path_live_weakrefs())
|
| 1207 |
+
)
|
| 1208 |
+
|
| 1209 |
+
def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex):
|
| 1210 |
+
"Remove a graph output from the unaliased, cached tensors in an ancestor node"
|
| 1211 |
+
depth, output_index = index
|
| 1212 |
+
node = list(self._path_from_root)[depth]
|
| 1213 |
+
node.unaliased_in_all_paths[output_index] = False
|
| 1214 |
+
x = self.path_weakrefs[depth][output_index]
|
| 1215 |
+
assert x is not None
|
| 1216 |
+
x.remove_extra_reference()
|
| 1217 |
+
|
| 1218 |
+
def _initialize_cached_tensors(self):
|
| 1219 |
+
# we should not be clearing output_weakrefs, and they should be set in the first
|
| 1220 |
+
# record run
|
| 1221 |
+
assert len(self.outputs_weakrefs) == len(self.outputs_metadata)
|
| 1222 |
+
|
| 1223 |
+
for i, (storage_info, metadata, make_cached) in enumerate(
|
| 1224 |
+
zip(
|
| 1225 |
+
self.output_storage_alias,
|
| 1226 |
+
self.outputs_metadata,
|
| 1227 |
+
self.unaliased_in_all_paths,
|
| 1228 |
+
)
|
| 1229 |
+
):
|
| 1230 |
+
if not make_cached:
|
| 1231 |
+
self.cached_tensor_outputs.append(None)
|
| 1232 |
+
continue
|
| 1233 |
+
|
| 1234 |
+
assert storage_info is UnaliasedStorage
|
| 1235 |
+
assert isinstance(metadata, dict)
|
| 1236 |
+
s = self.create_storage(metadata)
|
| 1237 |
+
out = self._reconstruct_from_tensor_metadata(metadata, storage=s)
|
| 1238 |
+
|
| 1239 |
+
# XXX: let autograd know that there will be an additional reference to the tensor
|
| 1240 |
+
# that can be ignored when deciding whether to do gradient buffer inplacing.
|
| 1241 |
+
# Otherwise, inplacing could differ between tracing and subsequent execution.
|
| 1242 |
+
# For some models we tested this led to inputs no longer being in cudagraph pools,
|
| 1243 |
+
# leading to spurious re-recordings.
|
| 1244 |
+
# It also tells AMP cache that even though the tensor impls cannot be cached
|
| 1245 |
+
# in dtype conversions.
|
| 1246 |
+
|
| 1247 |
+
torch._C._add_cached_tensor(out)
|
| 1248 |
+
|
| 1249 |
+
self_ref = weakref.ref(self)
|
| 1250 |
+
|
| 1251 |
+
# one reference in our array, and calling sys.getrefcount bumps the refcount by one
|
| 1252 |
+
def check_refcount(i):
|
| 1253 |
+
self_loc = self_ref()
|
| 1254 |
+
if self_loc is None:
|
| 1255 |
+
return False
|
| 1256 |
+
return self_loc.get_output_refcount(i) == 2
|
| 1257 |
+
|
| 1258 |
+
check = functools.partial(check_refcount, i=i)
|
| 1259 |
+
|
| 1260 |
+
self.outputs_weakrefs[i] = StorageWeakRefWrapper(out, extra_ref_check=check)
|
| 1261 |
+
self.cached_tensor_outputs.append(out)
|
| 1262 |
+
|
| 1263 |
+
def get_output_refcount(self, index):
|
| 1264 |
+
return sys.getrefcount(self.cached_tensor_outputs[index])
|
| 1265 |
+
|
| 1266 |
+
@property
|
| 1267 |
+
def parent(self):
|
| 1268 |
+
"unwraps the weakref to _parent"
|
| 1269 |
+
return self._parent() if self._parent is not None else None
|
| 1270 |
+
|
| 1271 |
+
@property
|
| 1272 |
+
def _path_to_root(self):
|
| 1273 |
+
"Returns all nodes in the path starting at self and ending at root"
|
| 1274 |
+
node = self
|
| 1275 |
+
while node:
|
| 1276 |
+
yield node
|
| 1277 |
+
node = node.parent
|
| 1278 |
+
|
| 1279 |
+
@property
|
| 1280 |
+
def _path_from_root(self):
|
| 1281 |
+
"Returns all nodes in the path starting at the root and ending at self"
|
| 1282 |
+
nodes = reversed(list(self._path_to_root))
|
| 1283 |
+
yield from nodes
|
| 1284 |
+
|
| 1285 |
+
def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor):
|
| 1286 |
+
"Is this tensor an output of a node in this path"
|
| 1287 |
+
for output_refs in self.path_weakrefs:
|
| 1288 |
+
for storage_weak_ref in output_refs:
|
| 1289 |
+
if storage_weak_ref is None:
|
| 1290 |
+
continue
|
| 1291 |
+
# don't need to check liveness of storage since the cuda graph managed
|
| 1292 |
+
# memory is never released.
|
| 1293 |
+
data_ptr = storage_weak_ref.data_ptr()
|
| 1294 |
+
if t.untyped_storage().data_ptr() == data_ptr:
|
| 1295 |
+
return True
|
| 1296 |
+
|
| 1297 |
+
return False
|
| 1298 |
+
|
| 1299 |
+
def _is_alias_of_live_recorded_tensor(
|
| 1300 |
+
self, t: torch.Tensor
|
| 1301 |
+
) -> Optional[PathOutputIndex]:
|
| 1302 |
+
for depth, output_refs in enumerate(self.path_weakrefs):
|
| 1303 |
+
for output_index, storage_ref in enumerate(output_refs):
|
| 1304 |
+
if (storage_and_ptr := maybe_deref(storage_ref)) is not None:
|
| 1305 |
+
storage, ptr = storage_and_ptr
|
| 1306 |
+
if ptr == t.untyped_storage().data_ptr():
|
| 1307 |
+
return (depth, output_index)
|
| 1308 |
+
|
| 1309 |
+
return None
|
| 1310 |
+
|
| 1311 |
+
@staticmethod
|
| 1312 |
+
def _check_liveness(
|
| 1313 |
+
indices: List[PathOutputIndex],
|
| 1314 |
+
output_refs: List[List[Optional[StorageWeakRefWrapper]]],
|
| 1315 |
+
):
|
| 1316 |
+
"Check that all of the indices specified are dead references"
|
| 1317 |
+
for depth, output_index in indices:
|
| 1318 |
+
w = output_refs[depth][output_index]
|
| 1319 |
+
assert w is not None
|
| 1320 |
+
if w() is not None:
|
| 1321 |
+
return False
|
| 1322 |
+
return True
|
| 1323 |
+
|
| 1324 |
+
def add_child(self, function_id: FunctionID, node: CUDAGraphNode):
|
| 1325 |
+
"Adds node as a a child of self"
|
| 1326 |
+
self.children[function_id].append(node)
|
| 1327 |
+
|
| 1328 |
+
@staticmethod
|
| 1329 |
+
def _get_different_indices(
|
| 1330 |
+
prev: List[List[bool]], curr: List[List[bool]]
|
| 1331 |
+
) -> List[PathOutputIndex]:
|
| 1332 |
+
"Find indices where the two lists differ."
|
| 1333 |
+
dead_indices = []
|
| 1334 |
+
assert len(prev) <= len(curr)
|
| 1335 |
+
for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):
|
| 1336 |
+
assert len(outputs1) == len(outputs2)
|
| 1337 |
+
for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):
|
| 1338 |
+
if output1 != output2:
|
| 1339 |
+
dead_indices.append((i, j))
|
| 1340 |
+
|
| 1341 |
+
return dead_indices
|
| 1342 |
+
|
| 1343 |
+
@staticmethod
|
| 1344 |
+
def _get_liveness(
|
| 1345 |
+
weakrefs: List[List[Optional[StorageWeakRefWrapper]]],
|
| 1346 |
+
) -> List[List[bool]]:
|
| 1347 |
+
"Maps weakrefs to true if the reference is alive and false otherwise"
|
| 1348 |
+
if len(weakrefs) == 0:
|
| 1349 |
+
return []
|
| 1350 |
+
|
| 1351 |
+
return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]
|
| 1352 |
+
|
| 1353 |
+
def debug_assert_invariants(
|
| 1354 |
+
self, expected_liveness: List[List[bool]], newly_dead: List[PathOutputIndex]
|
| 1355 |
+
):
|
| 1356 |
+
if not config.triton.fast_path_cudagraph_asserts:
|
| 1357 |
+
return
|
| 1358 |
+
|
| 1359 |
+
for i, node in enumerate(self._path_from_root):
|
| 1360 |
+
assert self.path_weakrefs[i] is node.outputs_weakrefs
|
| 1361 |
+
|
| 1362 |
+
nodes = list(self._path_from_root)
|
| 1363 |
+
|
| 1364 |
+
live_blocks = get_block_addrs(self.cuda_graphs_pool)
|
| 1365 |
+
|
| 1366 |
+
live_storage_data_ptrs = set()
|
| 1367 |
+
live_storage_weak_ptrs = set()
|
| 1368 |
+
|
| 1369 |
+
for depth, outputs_liveness in enumerate(expected_liveness):
|
| 1370 |
+
for output_idx, output_liveness in enumerate(outputs_liveness):
|
| 1371 |
+
# tensor can die early, but it can't be alive when it should be dead
|
| 1372 |
+
w = self.path_weakrefs[depth][output_idx]
|
| 1373 |
+
if (stor_weak_ptr_and_data_ptr := maybe_deref(w)) is not None:
|
| 1374 |
+
assert output_liveness
|
| 1375 |
+
stor_weak_ptr, stor_data_ptr = stor_weak_ptr_and_data_ptr
|
| 1376 |
+
assert (stor_data_ptr in live_storage_data_ptrs) == (
|
| 1377 |
+
stor_weak_ptr in live_storage_weak_ptrs
|
| 1378 |
+
)
|
| 1379 |
+
live_storage_data_ptrs.add(stor_data_ptr)
|
| 1380 |
+
live_storage_weak_ptrs.add(stor_weak_ptr)
|
| 1381 |
+
|
| 1382 |
+
is_persistent_alias = (
|
| 1383 |
+
nodes[depth].static_output_tensors[output_idx] is not None
|
| 1384 |
+
)
|
| 1385 |
+
|
| 1386 |
+
if is_persistent_alias:
|
| 1387 |
+
assert stor_data_ptr not in live_blocks
|
| 1388 |
+
|
| 1389 |
+
for depth, output_index in newly_dead:
|
| 1390 |
+
assert not is_live(self.path_weakrefs[depth][output_index])
|
| 1391 |
+
|
| 1392 |
+
def debug_check_invariants_before_invocation(self):
|
| 1393 |
+
self.debug_assert_invariants(
|
| 1394 |
+
self.recorded_liveness_before_graph, self.expected_dead_indices_before_graph
|
| 1395 |
+
)
|
| 1396 |
+
|
| 1397 |
+
def debug_check_invariants_after_invocation(self):
|
| 1398 |
+
self.debug_assert_invariants(
|
| 1399 |
+
self.recorded_liveness_before_graph, self.expected_dead_indices_after_graph
|
| 1400 |
+
)
|
| 1401 |
+
|
| 1402 |
+
def data_ptrs_dead_since_invocation(self) -> List[int]:
|
| 1403 |
+
"""
|
| 1404 |
+
Since this node was invoked, return data ptrs of all tensor outputs that have died
|
| 1405 |
+
in the current executing tree path.
|
| 1406 |
+
"""
|
| 1407 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 1408 |
+
_get_different_indices = self._get_different_indices(
|
| 1409 |
+
self.recorded_liveness_after_graph, curr_liveness
|
| 1410 |
+
)
|
| 1411 |
+
|
| 1412 |
+
path = list(self._path_from_root)
|
| 1413 |
+
ptrs_to_deallocate = []
|
| 1414 |
+
for depth, output_index in _get_different_indices:
|
| 1415 |
+
ptrs_to_deallocate.append(
|
| 1416 |
+
path[depth].outputs_metadata[output_index]["data_ptr"]
|
| 1417 |
+
)
|
| 1418 |
+
|
| 1419 |
+
return ptrs_to_deallocate
|
| 1420 |
+
|
| 1421 |
+
def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
|
| 1422 |
+
for i, j in self.live_indices_after_graph:
|
| 1423 |
+
out = self.path_weakrefs[i][j]
|
| 1424 |
+
if out is not None and is_live(out):
|
| 1425 |
+
yield out
|
| 1426 |
+
|
| 1427 |
+
def remove_node_cached_tensors(self):
|
| 1428 |
+
for t in self.cached_tensor_outputs:
|
| 1429 |
+
if t is not None:
|
| 1430 |
+
torch._C._remove_cached_tensor(t)
|
| 1431 |
+
self.cached_tensor_outputs.clear()
|
| 1432 |
+
|
| 1433 |
+
for i, unaliased in enumerate(self.unaliased_in_all_paths):
|
| 1434 |
+
if unaliased:
|
| 1435 |
+
n = self.outputs_weakrefs[i]
|
| 1436 |
+
assert n is not None
|
| 1437 |
+
n.remove_extra_reference()
|
| 1438 |
+
|
| 1439 |
+
def remove_path_cached_tensors(self):
|
| 1440 |
+
for node in self._path_from_root:
|
| 1441 |
+
node.remove_node_cached_tensors()
|
| 1442 |
+
|
| 1443 |
+
def clear_path_state(self):
|
| 1444 |
+
"Clear the path state in this current executing node"
|
| 1445 |
+
# this doesnt actually do anything right now, leaving it as placeholder
|
| 1446 |
+
pass
|
| 1447 |
+
|
| 1448 |
+
@staticmethod
|
| 1449 |
+
def _tensor_metadata(x, ignore_storage_offset=True):
|
| 1450 |
+
assert isinstance(x, torch.Tensor)
|
| 1451 |
+
# We ignore the storage offset for inputs, but not for outputs
|
| 1452 |
+
# TODO: - should we make the storage resizable ?
|
| 1453 |
+
return {
|
| 1454 |
+
"nbytes": x.untyped_storage().nbytes(),
|
| 1455 |
+
"data_ptr": x.untyped_storage().data_ptr(),
|
| 1456 |
+
"size": x.shape,
|
| 1457 |
+
"stride": x.stride(),
|
| 1458 |
+
"dtype": x.dtype,
|
| 1459 |
+
"device": x.device,
|
| 1460 |
+
"storage_offset": x.storage_offset() if not ignore_storage_offset else 0,
|
| 1461 |
+
}
|
| 1462 |
+
|
| 1463 |
+
def _reconstruct_from_tensor_metadata(
|
| 1464 |
+
self, metadata: Dict[str, Any], storage=None
|
| 1465 |
+
) -> Tensor:
|
| 1466 |
+
s = self.create_storage(metadata) if storage is None else storage
|
| 1467 |
+
return torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata(metadata, s)
|
| 1468 |
+
|
| 1469 |
+
def create_storage(self, metadata):
|
| 1470 |
+
return torch._C._construct_storage_from_data_pointer(
|
| 1471 |
+
metadata["data_ptr"], metadata["device"], metadata["nbytes"]
|
| 1472 |
+
)
|
| 1473 |
+
|
| 1474 |
+
def _allocate_and_copy_recording_inputs(
|
| 1475 |
+
self, inputs
|
| 1476 |
+
) -> List[Union[torch.Tensor, int]]:
|
| 1477 |
+
"""
|
| 1478 |
+
Allocate inputs for non static, non cudagraph managraphed managed tensors in the memory pool
|
| 1479 |
+
and copy over the tensor values.
|
| 1480 |
+
"""
|
| 1481 |
+
|
| 1482 |
+
torch.cuda.synchronize()
|
| 1483 |
+
self.stream.wait_stream(torch.cuda.current_stream())
|
| 1484 |
+
recording_inputs: List[Union[Tensor, int]] = []
|
| 1485 |
+
|
| 1486 |
+
with warnings.catch_warnings(record=True), torch.cuda.device(
|
| 1487 |
+
self.device
|
| 1488 |
+
), _use_cuda_memory_pool_manager(
|
| 1489 |
+
self.device,
|
| 1490 |
+
mem_pool=self.cuda_graphs_pool,
|
| 1491 |
+
stream=self.stream,
|
| 1492 |
+
):
|
| 1493 |
+
for i, inp in enumerate(inputs):
|
| 1494 |
+
if not isinstance(inp, torch.Tensor):
|
| 1495 |
+
assert isinstance(inp, int)
|
| 1496 |
+
recording_inputs.append(inp)
|
| 1497 |
+
elif i not in self.static_input_idxs:
|
| 1498 |
+
# static_input does an allocation!
|
| 1499 |
+
recording_inputs.append(static_input(inp))
|
| 1500 |
+
# copy over and clear non recording input
|
| 1501 |
+
self._copy_input(i, recording_inputs[-1], inp)
|
| 1502 |
+
inputs[i] = None
|
| 1503 |
+
del inp
|
| 1504 |
+
else:
|
| 1505 |
+
recording_inputs.append(inp)
|
| 1506 |
+
|
| 1507 |
+
return recording_inputs
|
| 1508 |
+
|
| 1509 |
+
def check_invariants(self, inputs: List[Tensor]) -> bool:
|
| 1510 |
+
"""
|
| 1511 |
+
Checks if this node can be run. The same pattern of tensor liveness and tensors
|
| 1512 |
+
managed in the cudagraph private pool must remain stable.
|
| 1513 |
+
"""
|
| 1514 |
+
|
| 1515 |
+
# previously managed data pointers remain stable
|
| 1516 |
+
for idx in self.cudagraph_managed_idxs:
|
| 1517 |
+
if inputs[idx].data_ptr() != self.static_input_data_ptrs[idx]:
|
| 1518 |
+
return False
|
| 1519 |
+
|
| 1520 |
+
if not self._check_liveness(
|
| 1521 |
+
self.expected_dead_indices_before_graph, self.path_weakrefs
|
| 1522 |
+
):
|
| 1523 |
+
return False
|
| 1524 |
+
|
| 1525 |
+
# the cudagraph managed tensors which died upon recording must also die upon
|
| 1526 |
+
# this invocation. it is too late to check after we've replayed the graph,
|
| 1527 |
+
# because we would have already written over their memory.
|
| 1528 |
+
for idx in self.cudagraph_managed_idxs:
|
| 1529 |
+
inputs[idx] = None # type: ignore[call-overload]
|
| 1530 |
+
|
| 1531 |
+
torch._check(
|
| 1532 |
+
self._check_liveness(
|
| 1533 |
+
self.expected_dead_indices_after_graph, self.path_weakrefs
|
| 1534 |
+
),
|
| 1535 |
+
lambda: "TODO: graph recording observed an input tensor deallocate during graph "
|
| 1536 |
+
" recording that did not occur during replay. Please file an issue.",
|
| 1537 |
+
)
|
| 1538 |
+
return True
|
| 1539 |
+
|
| 1540 |
+
def num_descendants(self) -> int:
|
| 1541 |
+
"Total number of descendents of this node"
|
| 1542 |
+
num_desc = 0
|
| 1543 |
+
for children in self.children.values():
|
| 1544 |
+
for child in children:
|
| 1545 |
+
num_desc += 1
|
| 1546 |
+
num_desc += child.num_descendants()
|
| 1547 |
+
return num_desc
|
| 1548 |
+
|
| 1549 |
+
|
| 1550 |
+
def get_cudagraph_segments(pool_id):
|
| 1551 |
+
segments = torch.cuda.memory_snapshot()
|
| 1552 |
+
return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
|
| 1553 |
+
|
| 1554 |
+
|
| 1555 |
+
def get_block_addrs(pool_id, live_only=True):
|
| 1556 |
+
blocks = []
|
| 1557 |
+
|
| 1558 |
+
for segment in get_cudagraph_segments(pool_id):
|
| 1559 |
+
addr = segment["address"]
|
| 1560 |
+
for block in segment["blocks"]:
|
| 1561 |
+
if block["state"] == "active_allocated" or not live_only:
|
| 1562 |
+
blocks.append(addr)
|
| 1563 |
+
|
| 1564 |
+
addr += block["size"]
|
| 1565 |
+
|
| 1566 |
+
return blocks
|
| 1567 |
+
|
| 1568 |
+
|
| 1569 |
+
def format_tb(frames):
|
| 1570 |
+
formatted_traceback = []
|
| 1571 |
+
|
| 1572 |
+
for entry in frames:
|
| 1573 |
+
formatted_traceback.append(
|
| 1574 |
+
traceback.FrameSummary(entry["filename"], entry["line"], entry["name"])
|
| 1575 |
+
)
|
| 1576 |
+
|
| 1577 |
+
return "".join(traceback.format_list(formatted_traceback))
|
| 1578 |
+
|
| 1579 |
+
|
| 1580 |
+
def check_memory_pool(device, pool_id, live_storages_ptrs: List[StorageWeakRefWrapper]):
|
| 1581 |
+
assert all(
|
| 1582 |
+
isinstance(elem, StorageWeakRefWrapper) for elem in live_storages_ptrs
|
| 1583 |
+
) # noqa: C419
|
| 1584 |
+
unique_storages = {stor.data_ptr() for stor in live_storages_ptrs if stor()}
|
| 1585 |
+
|
| 1586 |
+
# check if there is a divergence first, then do the expensive snapshot call after
|
| 1587 |
+
# we know it will error
|
| 1588 |
+
if torch._C._cuda_checkPoolLiveAllocations(device, pool_id, unique_storages):
|
| 1589 |
+
return
|
| 1590 |
+
|
| 1591 |
+
# at this point we are past the fast-path. we have seen rare cases where a dead tensor is dead,
|
| 1592 |
+
# but hasn't been gc'd yet, and gives false positive for allocated_not_in_live_storages
|
| 1593 |
+
gc.collect()
|
| 1594 |
+
|
| 1595 |
+
segments = get_cudagraph_segments(pool_id)
|
| 1596 |
+
|
| 1597 |
+
allocated_not_in_live_storages = {}
|
| 1598 |
+
|
| 1599 |
+
for segment in segments:
|
| 1600 |
+
addr = segment["address"]
|
| 1601 |
+
for block in segment["blocks"]:
|
| 1602 |
+
if block["state"] == "active_allocated":
|
| 1603 |
+
if addr not in unique_storages:
|
| 1604 |
+
allocated_not_in_live_storages[addr] = block
|
| 1605 |
+
else:
|
| 1606 |
+
unique_storages.remove(addr)
|
| 1607 |
+
|
| 1608 |
+
addr += block["size"]
|
| 1609 |
+
|
| 1610 |
+
torch._check(
|
| 1611 |
+
len(unique_storages) == 0,
|
| 1612 |
+
lambda: f"These storage data ptrs are not allocated in pool {pool_id} but should be {unique_storages}",
|
| 1613 |
+
)
|
| 1614 |
+
|
| 1615 |
+
if allocated_not_in_live_storages != 0:
|
| 1616 |
+
formatted = []
|
| 1617 |
+
for dp, block in allocated_not_in_live_storages.items():
|
| 1618 |
+
trace = format_tb(block.get("frames", []))
|
| 1619 |
+
formatted.append(f"Data Pointer: {dp}, history: \n{trace}")
|
| 1620 |
+
formatted_s = "\n".join(formatted)
|
| 1621 |
+
msg = (
|
| 1622 |
+
f"These live storage data ptrs are in the cudagraph pool but not "
|
| 1623 |
+
f"accounted for as an output of cudagraph trees: \n\n{formatted_s}"
|
| 1624 |
+
)
|
| 1625 |
+
raise RuntimeError(msg)
|
| 1626 |
+
|
| 1627 |
+
|
| 1628 |
+
class ExecutionState(Enum):
|
| 1629 |
+
"""
|
| 1630 |
+
Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated
|
| 1631 |
+
in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.
|
| 1632 |
+
"""
|
| 1633 |
+
|
| 1634 |
+
NONE = auto()
|
| 1635 |
+
WARMUP = auto()
|
| 1636 |
+
RECORDING = auto()
|
| 1637 |
+
EXECUTION = auto()
|
| 1638 |
+
|
| 1639 |
+
|
| 1640 |
+
class CompilationMode(Enum):
|
| 1641 |
+
FORWARD = auto()
|
| 1642 |
+
BACKWARD = auto()
|
| 1643 |
+
INFERENCE = auto()
|
| 1644 |
+
|
| 1645 |
+
|
| 1646 |
+
class CUDAGraphTreeManager:
|
| 1647 |
+
"""
|
| 1648 |
+
Groups individual recordings or executions of cuda graphs into a tree of recordings,
|
| 1649 |
+
and checks required invariants, and manages warmups of graphs.
|
| 1650 |
+
|
| 1651 |
+
When graphs are recorded in the same tree, it enforces subsequent execution
|
| 1652 |
+
to follow the same order and have the same output tensor livespans. To remove
|
| 1653 |
+
unnecessary coupling of cuda graphs (and additional imposed invariants),
|
| 1654 |
+
the tree manager will end a currently recording tree whenever it is valid - when
|
| 1655 |
+
the memory pool no longer has any live allocations.
|
| 1656 |
+
|
| 1657 |
+
We ignore outputs from a previous generation that correspond to prior model outputs.
|
| 1658 |
+
Currently this is hardcoded `GenerationTracker.generation` tracked in torch dynamo.
|
| 1659 |
+
# TODO: make generation increment configurable, warn on overwrite.
|
| 1660 |
+
|
| 1661 |
+
We run graph warmups in the cudagraph memory pool and return the result on the first invocation
|
| 1662 |
+
of a function. For many models it is important to reclaim activations as you run the backward.
|
| 1663 |
+
If we were to warm up the model and keep an extra copy of the inputs around to subsequently
|
| 1664 |
+
use for recording, we would incur a memory penalty. Additionally, if we are part way through training
|
| 1665 |
+
your model and need to recompile, memory will be allocated to the cuda graph pool, so we run this
|
| 1666 |
+
warmup run in the cuda graph memory pool. As for recording, warm up needs the state of live tensors
|
| 1667 |
+
to be accurately reflected so we checkpoint the allocator state if we need to warm up following graph
|
| 1668 |
+
replay.
|
| 1669 |
+
"""
|
| 1670 |
+
|
| 1671 |
+
def __init__(self, device_index: int):
|
| 1672 |
+
# roots are functions which have no dependencies on an other node. I.e.,
|
| 1673 |
+
# when they are first invoked, none of their inputs are outputs are outputs
|
| 1674 |
+
# of another node, nor are there any live outputs of another node whose
|
| 1675 |
+
# liveness would create a dependency.
|
| 1676 |
+
self.roots: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
|
| 1677 |
+
|
| 1678 |
+
# mapping from function id to wrapped function
|
| 1679 |
+
self.ids_to_funcs: Dict[FunctionID, WrappedFunction] = {}
|
| 1680 |
+
|
| 1681 |
+
self.ids_to_stack_traces: Dict[FunctionID, StackTraces] = {}
|
| 1682 |
+
|
| 1683 |
+
self.warmed_up_functions: Set[FunctionID] = set()
|
| 1684 |
+
# if we fail to increment generation, and are stuck warming up,
|
| 1685 |
+
# only warn on each function once
|
| 1686 |
+
self.warned_functions: Set[FunctionID] = set()
|
| 1687 |
+
torch._C._set_cached_tensors_enabled(True)
|
| 1688 |
+
|
| 1689 |
+
# NB: cuda caching allocator will remember the stream a segment is allocated to
|
| 1690 |
+
# and only allocate that segment to the same stream. we need to use a single stream
|
| 1691 |
+
# for all allocations to the memory pool, otherwise the allocations to separate streams
|
| 1692 |
+
# will not be reused; separate recordings would have use the same memory pool, but not
|
| 1693 |
+
# the same memory.
|
| 1694 |
+
|
| 1695 |
+
with torch.cuda.device(device_index):
|
| 1696 |
+
torch.cuda.synchronize()
|
| 1697 |
+
self.stream = torch.cuda.Stream()
|
| 1698 |
+
self.stream.wait_stream(torch.cuda.current_stream())
|
| 1699 |
+
|
| 1700 |
+
# Keeps Memory Pool Alive
|
| 1701 |
+
self.graph: Optional[torch.cuda.CUDAGraph] = torch.cuda.CUDAGraph()
|
| 1702 |
+
self.cuda_graphs_thread_pool = torch.cuda.graph_pool_handle()
|
| 1703 |
+
|
| 1704 |
+
with warnings.catch_warnings(record=True), torch.cuda.graph(
|
| 1705 |
+
self.graph,
|
| 1706 |
+
pool=self.cuda_graphs_thread_pool,
|
| 1707 |
+
stream=self.stream,
|
| 1708 |
+
capture_error_mode="thread_local",
|
| 1709 |
+
):
|
| 1710 |
+
pass
|
| 1711 |
+
|
| 1712 |
+
self.graph_counter = itertools.count(0)
|
| 1713 |
+
self.func_counter = itertools.count(0)
|
| 1714 |
+
|
| 1715 |
+
# whether we the current node is in a state of warmup, recording, execution. If
|
| 1716 |
+
# there is no current node the state will be ExecutionState.None.
|
| 1717 |
+
self.path_state = ExecutionState.NONE
|
| 1718 |
+
self.device_index = device_index
|
| 1719 |
+
|
| 1720 |
+
# the most recently invoked cudagraph wrapping of a function. Will be None
|
| 1721 |
+
# when there is no output from a previous recording or execution whose memory
|
| 1722 |
+
# we need to respect in the cuda caching allocation. If you incremented generation,
|
| 1723 |
+
# this will also be none, as ignore those allocations.
|
| 1724 |
+
self.current_node: Optional[CUDAGraphNode] = None
|
| 1725 |
+
|
| 1726 |
+
# current generation of cudagraph invocations. when torch.compile is run
|
| 1727 |
+
# we increment the current generation. are willing to ignore live outputs
|
| 1728 |
+
# of a previous generation in checking liveness.
|
| 1729 |
+
self.current_gen: int = -1
|
| 1730 |
+
|
| 1731 |
+
# number of instances we are in execution and failed to match to an
|
| 1732 |
+
# existing child
|
| 1733 |
+
self.debug_fail_counter = 0
|
| 1734 |
+
# number of instances we had to checkpoint the function
|
| 1735 |
+
self.debug_checkpointing_counter = 0
|
| 1736 |
+
|
| 1737 |
+
self.id_to_mode: Dict[FunctionID, CompilationMode] = {}
|
| 1738 |
+
|
| 1739 |
+
# Note: [Backward Generation Handling]
|
| 1740 |
+
# We generally perform a sequence of forward executions followed by backward executions.
|
| 1741 |
+
# If multiple torch.compile wrapped forwards are executed with their backwards pending,
|
| 1742 |
+
# we should not disregard the outputs from a prior torch.compile since the entire training
|
| 1743 |
+
# loop hasn't completed. Occasionally, a backward pass corresponding to a forward pass may
|
| 1744 |
+
# not be executed, so we cannot wait for all pending forward pass backward completions, so
|
| 1745 |
+
# we cannot wait for all backwards to have been invoked. Instead we wait for a single backward
|
| 1746 |
+
# invocation. Triggering a backward pass typically doesn't lead to another torch.compile
|
| 1747 |
+
# invocation, making it less likely for the generation to increase between multiple
|
| 1748 |
+
# backward calls. The following use case is covered by this approach:
|
| 1749 |
+
# mod1 = torch.compile(...)
|
| 1750 |
+
# mod2 = torch.compile(...)
|
| 1751 |
+
# mod2(mod1(x)).sum().backward()
|
| 1752 |
+
|
| 1753 |
+
self.running_forwards_with_pending_backwards = False
|
| 1754 |
+
|
| 1755 |
+
def run(self, new_inputs: List[Tensor], function_id: FunctionID):
|
| 1756 |
+
assert self.graph is not None, "Running CUDAGraph after shutdown"
|
| 1757 |
+
out = self._run(new_inputs, function_id)
|
| 1758 |
+
|
| 1759 |
+
# The forwards are only pending following invocation, not before
|
| 1760 |
+
mode = self.id_to_mode[function_id]
|
| 1761 |
+
if mode == CompilationMode.FORWARD:
|
| 1762 |
+
self.running_forwards_with_pending_backwards = True
|
| 1763 |
+
elif mode == CompilationMode.BACKWARD:
|
| 1764 |
+
self.running_forwards_with_pending_backwards = False
|
| 1765 |
+
|
| 1766 |
+
return out
|
| 1767 |
+
|
| 1768 |
+
def set_to_running_backward(self):
|
| 1769 |
+
self.running_forwards_with_pending_backwards = False
|
| 1770 |
+
|
| 1771 |
+
def _run(self, new_inputs: List[Tensor], function_id: FunctionID):
|
| 1772 |
+
# we will try to end the current execution lazily, since
|
| 1773 |
+
# we dont want to do unnecessary checking of the existing outputs
|
| 1774 |
+
# on the hot path, but both recording and warmup only happen once
|
| 1775 |
+
# so we check up front
|
| 1776 |
+
if self.in_recording:
|
| 1777 |
+
self.try_end_curr_recording(function_id)
|
| 1778 |
+
|
| 1779 |
+
if self.in_warmup:
|
| 1780 |
+
self.try_end_curr_warmup(function_id)
|
| 1781 |
+
|
| 1782 |
+
# warming up a function and subsequentally recording may use different memory addresses
|
| 1783 |
+
# because both depend on the state of the caching allocator. if we warm up graph A,
|
| 1784 |
+
# then warm up graph B and make more allocations, the subsequent recording of A will not
|
| 1785 |
+
# necessarily use the same addresses as in the warm up. Thus any warm up of a node can only
|
| 1786 |
+
# be followed by warm up runs.
|
| 1787 |
+
if (
|
| 1788 |
+
not (
|
| 1789 |
+
function_id in self.warmed_up_functions
|
| 1790 |
+
or config.triton.skip_cudagraph_warmup
|
| 1791 |
+
)
|
| 1792 |
+
) or self.in_warmup:
|
| 1793 |
+
# If we are in the middle of executing cuda graphs, then we need to checkpoint memory state.
|
| 1794 |
+
# Both Recording and Warmup will be reflected in the allocator and dont need changes
|
| 1795 |
+
if self.path_state == ExecutionState.EXECUTION:
|
| 1796 |
+
self.apply_checkpoint_execution_state_in_allocator()
|
| 1797 |
+
|
| 1798 |
+
return self.run_eager(new_inputs, function_id)
|
| 1799 |
+
|
| 1800 |
+
child_nodes = (
|
| 1801 |
+
self.roots if self.current_node is None else self.current_node.children
|
| 1802 |
+
)
|
| 1803 |
+
|
| 1804 |
+
if not self.in_recording:
|
| 1805 |
+
for child in child_nodes[function_id]:
|
| 1806 |
+
# here we are checking memory consistency between recording and execution,
|
| 1807 |
+
# as well as things like stability of tensor locations, etc
|
| 1808 |
+
# and other
|
| 1809 |
+
if child.check_invariants(new_inputs):
|
| 1810 |
+
return self.execute_node(child, new_inputs)
|
| 1811 |
+
|
| 1812 |
+
# now that we know the new function can't be run as a child of the
|
| 1813 |
+
# current node, if it is a root, try to end the current execution.
|
| 1814 |
+
# as noted above, we want to do this lazily to avoid having to
|
| 1815 |
+
# check all existing outputs
|
| 1816 |
+
if self.current_node is not None and function_id in self.roots:
|
| 1817 |
+
self.try_end_curr_execution()
|
| 1818 |
+
|
| 1819 |
+
# run again to hit the root matching case which must succeed
|
| 1820 |
+
if self.current_node is None:
|
| 1821 |
+
return self.run(new_inputs, function_id)
|
| 1822 |
+
|
| 1823 |
+
# at this point, we necessarily will do a new recording
|
| 1824 |
+
self.debug_fail_counter += 1
|
| 1825 |
+
|
| 1826 |
+
self.try_end_curr_execution()
|
| 1827 |
+
if self.current_node is not None:
|
| 1828 |
+
self.apply_checkpoint_execution_state_in_allocator()
|
| 1829 |
+
|
| 1830 |
+
# now, we are in a recording state !
|
| 1831 |
+
return self.record_function(new_inputs, function_id)
|
| 1832 |
+
|
| 1833 |
+
def shutdown(self):
|
| 1834 |
+
"""
|
| 1835 |
+
Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn
|
| 1836 |
+
might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown
|
| 1837 |
+
to avoid a reference cycle.
|
| 1838 |
+
"""
|
| 1839 |
+
nodes = []
|
| 1840 |
+
for roots in self.roots.values():
|
| 1841 |
+
nodes.extend(roots)
|
| 1842 |
+
|
| 1843 |
+
while nodes:
|
| 1844 |
+
node = nodes.pop()
|
| 1845 |
+
for children in node.children.values():
|
| 1846 |
+
nodes.extend(children)
|
| 1847 |
+
node.remove_node_cached_tensors()
|
| 1848 |
+
node.graph = None
|
| 1849 |
+
|
| 1850 |
+
self.graph = None
|
| 1851 |
+
self.roots = None # type: ignore[assignment]
|
| 1852 |
+
self.current_node = None
|
| 1853 |
+
|
| 1854 |
+
def record_function(self, new_inputs, function_id) -> List[Optional[Tensor]]:
|
| 1855 |
+
graph_id = self.new_graph_id()
|
| 1856 |
+
log.debug(
|
| 1857 |
+
"Recording function %d of graph recording id %d",
|
| 1858 |
+
function_id.id,
|
| 1859 |
+
graph_id.id,
|
| 1860 |
+
)
|
| 1861 |
+
torch.cuda.synchronize()
|
| 1862 |
+
node = CUDAGraphNode(
|
| 1863 |
+
self.ids_to_funcs[function_id],
|
| 1864 |
+
graph_id,
|
| 1865 |
+
self.current_node,
|
| 1866 |
+
new_inputs,
|
| 1867 |
+
self.cuda_graphs_thread_pool,
|
| 1868 |
+
self.device_index,
|
| 1869 |
+
self.ids_to_stack_traces[function_id],
|
| 1870 |
+
self.stream,
|
| 1871 |
+
)
|
| 1872 |
+
if self.current_node is None:
|
| 1873 |
+
self.roots[function_id].append(node)
|
| 1874 |
+
else:
|
| 1875 |
+
self.current_node.add_child(function_id, node)
|
| 1876 |
+
self.current_node = node
|
| 1877 |
+
self.path_state = ExecutionState.RECORDING
|
| 1878 |
+
self.update_generation()
|
| 1879 |
+
torch.cuda.synchronize()
|
| 1880 |
+
return node.run_first_inputs(new_inputs)
|
| 1881 |
+
|
| 1882 |
+
def execute_node(self, node: CUDAGraphNode, new_inputs) -> List[Optional[Tensor]]:
|
| 1883 |
+
self.current_node = node
|
| 1884 |
+
self.path_state = ExecutionState.EXECUTION
|
| 1885 |
+
self.update_generation()
|
| 1886 |
+
return node.run(new_inputs)
|
| 1887 |
+
|
| 1888 |
+
def run_eager(self, new_inputs, function_id: FunctionID):
|
| 1889 |
+
# this is only stored on current node, because when we start a new path,
|
| 1890 |
+
# we will deallocate it
|
| 1891 |
+
already_warm = function_id in self.warmed_up_functions
|
| 1892 |
+
if not already_warm:
|
| 1893 |
+
log.debug("Running warmup of function %d", function_id.id)
|
| 1894 |
+
else:
|
| 1895 |
+
log.debug(
|
| 1896 |
+
"Running eager of function %d because ancestor needed to warm up",
|
| 1897 |
+
function_id.id,
|
| 1898 |
+
)
|
| 1899 |
+
self.warmed_up_functions.add(function_id)
|
| 1900 |
+
node = CUDAWarmupNode(
|
| 1901 |
+
self.ids_to_funcs[function_id],
|
| 1902 |
+
self.current_node,
|
| 1903 |
+
self.cuda_graphs_thread_pool,
|
| 1904 |
+
self.graph,
|
| 1905 |
+
self.device_index,
|
| 1906 |
+
self.ids_to_stack_traces[function_id],
|
| 1907 |
+
self.stream,
|
| 1908 |
+
already_warm,
|
| 1909 |
+
)
|
| 1910 |
+
self.current_node = node
|
| 1911 |
+
self.path_state = ExecutionState.WARMUP
|
| 1912 |
+
self.update_generation()
|
| 1913 |
+
return node.run(new_inputs)
|
| 1914 |
+
|
| 1915 |
+
def new_graph_id(self) -> GraphID:
|
| 1916 |
+
return GraphID(next(self.graph_counter))
|
| 1917 |
+
|
| 1918 |
+
def new_func_id(self) -> FunctionID:
|
| 1919 |
+
return FunctionID(next(self.func_counter))
|
| 1920 |
+
|
| 1921 |
+
def add_function(
|
| 1922 |
+
self,
|
| 1923 |
+
model,
|
| 1924 |
+
inputs,
|
| 1925 |
+
static_input_idxs,
|
| 1926 |
+
stack_traces,
|
| 1927 |
+
mode,
|
| 1928 |
+
constants,
|
| 1929 |
+
) -> Tuple[Callable[..., Any], List[Optional[Tensor]]]:
|
| 1930 |
+
id = self.new_func_id()
|
| 1931 |
+
self.ids_to_stack_traces[id] = stack_traces
|
| 1932 |
+
self.ids_to_funcs[id] = WrappedFunction(
|
| 1933 |
+
model,
|
| 1934 |
+
static_input_idxs,
|
| 1935 |
+
id,
|
| 1936 |
+
tuple(t for t in constants if isinstance(t, torch.Tensor) and t.is_cuda),
|
| 1937 |
+
)
|
| 1938 |
+
self.id_to_mode[id] = mode
|
| 1939 |
+
fn = functools.partial(self.run, function_id=id)
|
| 1940 |
+
|
| 1941 |
+
# container needs to set clean up when fn dies
|
| 1942 |
+
get_container(self.device_index).add_strong_reference(fn)
|
| 1943 |
+
return fn, fn(inputs)
|
| 1944 |
+
|
| 1945 |
+
@property
|
| 1946 |
+
def in_recording(self):
|
| 1947 |
+
return self.path_state == ExecutionState.RECORDING
|
| 1948 |
+
|
| 1949 |
+
@property
|
| 1950 |
+
def in_warmup(self):
|
| 1951 |
+
return self.path_state == ExecutionState.WARMUP
|
| 1952 |
+
|
| 1953 |
+
def get_roots(self) -> Iterator[CUDAGraphNode]:
|
| 1954 |
+
for nodes in self.roots.values():
|
| 1955 |
+
yield from nodes
|
| 1956 |
+
|
| 1957 |
+
@property
|
| 1958 |
+
def current_node(self):
|
| 1959 |
+
return self._current_node
|
| 1960 |
+
|
| 1961 |
+
@current_node.setter
|
| 1962 |
+
def current_node(self, value):
|
| 1963 |
+
self._current_node = value
|
| 1964 |
+
if value is None:
|
| 1965 |
+
self.path_state = ExecutionState.NONE
|
| 1966 |
+
|
| 1967 |
+
def update_generation(self):
|
| 1968 |
+
self.current_gen = self.get_curr_generation()
|
| 1969 |
+
|
| 1970 |
+
@staticmethod
|
| 1971 |
+
def get_curr_generation() -> int:
|
| 1972 |
+
if MarkStepBox.mark_step_counter != 0:
|
| 1973 |
+
return MarkStepBox.mark_step_counter
|
| 1974 |
+
|
| 1975 |
+
return GenerationTracker.generation
|
| 1976 |
+
|
| 1977 |
+
@staticmethod
|
| 1978 |
+
def user_invoked_mark_step():
|
| 1979 |
+
return MarkStepBox.mark_step_counter != 0
|
| 1980 |
+
|
| 1981 |
+
def can_start_new_generation(self) -> bool:
|
| 1982 |
+
if not self.in_new_torch_compile_invocation():
|
| 1983 |
+
return False
|
| 1984 |
+
|
| 1985 |
+
if self.user_invoked_mark_step():
|
| 1986 |
+
return True
|
| 1987 |
+
|
| 1988 |
+
return not self.running_forwards_with_pending_backwards
|
| 1989 |
+
|
| 1990 |
+
def in_new_torch_compile_invocation(self):
|
| 1991 |
+
return self.current_gen != self.get_curr_generation()
|
| 1992 |
+
|
| 1993 |
+
def try_end_curr_recording(self, function_id: FunctionID) -> None:
|
| 1994 |
+
"""
|
| 1995 |
+
Check if the current recording can be terminated, either because all outputs of the
|
| 1996 |
+
previously recorded node are dead or because it was executed in a different
|
| 1997 |
+
generation. Will set current_node to None and in_recording to False if successful.
|
| 1998 |
+
"""
|
| 1999 |
+
assert self.in_recording
|
| 2000 |
+
assert self.current_node is not None
|
| 2001 |
+
|
| 2002 |
+
# multiple invocations, allow overwriting the previous generation
|
| 2003 |
+
if self.can_start_new_generation():
|
| 2004 |
+
self.dealloc_current_path_weakrefs()
|
| 2005 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2006 |
+
return
|
| 2007 |
+
|
| 2008 |
+
if self.current_node.all_outputs_are_dead():
|
| 2009 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2010 |
+
return
|
| 2011 |
+
|
| 2012 |
+
self.check_warn_on_unable_to_start_executing(function_id)
|
| 2013 |
+
|
| 2014 |
+
def try_end_curr_execution(self) -> None:
|
| 2015 |
+
"""
|
| 2016 |
+
Check if the current executing node can be terminated, either because all outputs of the
|
| 2017 |
+
previously executed node are dead or because it was executed in a different generation.
|
| 2018 |
+
Will set current_node to None if successful.
|
| 2019 |
+
"""
|
| 2020 |
+
|
| 2021 |
+
assert not self.in_recording
|
| 2022 |
+
if self.current_node is None:
|
| 2023 |
+
return
|
| 2024 |
+
|
| 2025 |
+
if self.can_start_new_generation():
|
| 2026 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2027 |
+
return
|
| 2028 |
+
|
| 2029 |
+
if self.current_node.all_outputs_are_dead():
|
| 2030 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2031 |
+
|
| 2032 |
+
def try_end_curr_warmup(self, function_id: FunctionID):
|
| 2033 |
+
if self.can_start_new_generation():
|
| 2034 |
+
self.dealloc_current_path_weakrefs()
|
| 2035 |
+
self.current_node = None
|
| 2036 |
+
return
|
| 2037 |
+
|
| 2038 |
+
if self.current_node.all_outputs_are_dead():
|
| 2039 |
+
self.current_node = None
|
| 2040 |
+
return
|
| 2041 |
+
|
| 2042 |
+
self.check_warn_on_unable_to_start_executing(function_id)
|
| 2043 |
+
|
| 2044 |
+
def check_warn_on_unable_to_start_executing(self, function_id: FunctionID):
|
| 2045 |
+
"Warn if we in a potential loop where we are unable to hit fast path"
|
| 2046 |
+
if (
|
| 2047 |
+
function_id in self.warned_functions
|
| 2048 |
+
or not self.in_new_torch_compile_invocation()
|
| 2049 |
+
):
|
| 2050 |
+
return
|
| 2051 |
+
|
| 2052 |
+
existing_nodes = [
|
| 2053 |
+
node
|
| 2054 |
+
for node in self.current_node._path_from_root
|
| 2055 |
+
if node.wrapped_function.id == function_id
|
| 2056 |
+
]
|
| 2057 |
+
|
| 2058 |
+
if len(existing_nodes) <= 1:
|
| 2059 |
+
return
|
| 2060 |
+
|
| 2061 |
+
# repeated same pattern
|
| 2062 |
+
parents = {
|
| 2063 |
+
n.parent.wrapped_function.id
|
| 2064 |
+
for n in itertools.chain(existing_nodes, (self.current_node,))
|
| 2065 |
+
if n.parent is not None
|
| 2066 |
+
}
|
| 2067 |
+
if len(parents) == len(existing_nodes):
|
| 2068 |
+
return
|
| 2069 |
+
|
| 2070 |
+
self.warned_functions.add(function_id)
|
| 2071 |
+
warnings.warn(
|
| 2072 |
+
"Unable to hit fast path of CUDAGraphs because of pending, uninvoked backwards. "
|
| 2073 |
+
"Consider running with torch.no_grad() or using torch.compiler.cudagraph_mark_step_begin() "
|
| 2074 |
+
"before each model invocation"
|
| 2075 |
+
)
|
| 2076 |
+
|
| 2077 |
+
def dealloc_current_path_weakrefs(self):
|
| 2078 |
+
# TODO: we could also allow the these weak refs to continue to be allocated,
|
| 2079 |
+
# but that adds some complications.
|
| 2080 |
+
for node in self.current_node._path_from_root:
|
| 2081 |
+
assert len(node.tensor_weakrefs) == len(node.stack_traces)
|
| 2082 |
+
for t, stack_trace in zip(node.tensor_weakrefs, node.stack_traces):
|
| 2083 |
+
ten = None if t is None else t()
|
| 2084 |
+
if ten is None:
|
| 2085 |
+
continue
|
| 2086 |
+
|
| 2087 |
+
stack_trace = (
|
| 2088 |
+
stack_trace.strip()
|
| 2089 |
+
if stack_trace
|
| 2090 |
+
else "[Could not find stack trace]"
|
| 2091 |
+
)
|
| 2092 |
+
msg = (
|
| 2093 |
+
"Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run. "
|
| 2094 |
+
f"Stack trace: {stack_trace}. "
|
| 2095 |
+
"To prevent overwriting, clone the tensor outside of torch.compile() "
|
| 2096 |
+
"or call torch.compiler.cudagraph_mark_step_begin() before each model invocation."
|
| 2097 |
+
)
|
| 2098 |
+
torch._C._set_storage_access_error_msg(ten, msg)
|
| 2099 |
+
|
| 2100 |
+
deleted = set()
|
| 2101 |
+
for storage_ref in self.current_node.path_live_weakrefs():
|
| 2102 |
+
if storage_ref() and storage_ref.data_ptr() not in deleted:
|
| 2103 |
+
deleted.add(storage_ref.data_ptr())
|
| 2104 |
+
torch._C._free_And_Remove_DeleterFn(storage_ref())
|
| 2105 |
+
|
| 2106 |
+
def clear_current_path_state_and_set_to_none(self):
|
| 2107 |
+
self.current_node.clear_path_state()
|
| 2108 |
+
self.current_node = None
|
| 2109 |
+
|
| 2110 |
+
def apply_checkpoint_execution_state_in_allocator(self):
|
| 2111 |
+
"""
|
| 2112 |
+
Checkpoint the current execution state in the caching allocator so that
|
| 2113 |
+
additional cudagraph recordings can be made respecting existent live storages.
|
| 2114 |
+
"""
|
| 2115 |
+
self.debug_checkpointing_counter += 1
|
| 2116 |
+
log.debug(
|
| 2117 |
+
"Checkpointing cuda caching allocator state. Number of checkpoints %d",
|
| 2118 |
+
self.debug_checkpointing_counter,
|
| 2119 |
+
)
|
| 2120 |
+
|
| 2121 |
+
state = self.current_node.checkpointed_caching_state
|
| 2122 |
+
device = self.current_node.device
|
| 2123 |
+
assert state is not None and device is not None
|
| 2124 |
+
|
| 2125 |
+
# currently we deallocate on instead of allowing stale recordings
|
| 2126 |
+
stale_storages: List[int] = []
|
| 2127 |
+
|
| 2128 |
+
# remove cached tensors, otherwise they would prevent memory from being
|
| 2129 |
+
# reclaimed in subsequent recordings
|
| 2130 |
+
self.current_node.remove_path_cached_tensors()
|
| 2131 |
+
live_storages_wrappers = list(self.current_node.path_live_weakrefs())
|
| 2132 |
+
|
| 2133 |
+
live_storages_weak_refs = [t() for t in live_storages_wrappers]
|
| 2134 |
+
ptrs_to_deallocate = self.current_node.data_ptrs_dead_since_invocation()
|
| 2135 |
+
torch._C._cuda_setCheckpointPoolState(
|
| 2136 |
+
device, state, stale_storages, live_storages_weak_refs
|
| 2137 |
+
)
|
| 2138 |
+
|
| 2139 |
+
# NB: deduplicate aliased outputs
|
| 2140 |
+
for ptr in set(ptrs_to_deallocate):
|
| 2141 |
+
torch._C._cuda_cudaCachingAllocator_raw_delete(ptr)
|
| 2142 |
+
|
| 2143 |
+
# Now the live blocks should be exactly equal to the live storages in private pool
|
| 2144 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 2145 |
+
check_memory_pool(
|
| 2146 |
+
self.device_index, self.cuda_graphs_thread_pool, live_storages_wrappers
|
| 2147 |
+
)
|
| 2148 |
+
for wrapper in live_storages_wrappers:
|
| 2149 |
+
assert wrapper()
|
| 2150 |
+
assert torch._C._has_Standard_Deleter(wrapper())
|
| 2151 |
+
assert wrapper.data_ptr() not in ptrs_to_deallocate
|
| 2152 |
+
|
| 2153 |
+
def live_cudagraph_pool_storages_in_curr_execution(
|
| 2154 |
+
self,
|
| 2155 |
+
) -> List[StorageWeakRefPointer]:
|
| 2156 |
+
if self.current_node is None:
|
| 2157 |
+
return []
|
| 2158 |
+
# explicitly ignoring previous recorded outputs from past path
|
| 2159 |
+
return [t() for t in self.current_node.path_live_weakrefs()]
|
vila/lib/python3.10/site-packages/torch/_inductor/cudagraph_utils.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
from typing import Dict, Iterable, Optional
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch._inductor.codecache import CompiledFxGraph
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_mutating_use_stack_trace(placeholder_node: torch.fx.Node) -> Optional[str]:
|
| 9 |
+
# reinplaced uses might have a single, non-copy_ use
|
| 10 |
+
if len(placeholder_node.users) == 1:
|
| 11 |
+
return next(iter(placeholder_node.users)).meta.get("stack_trace", None)
|
| 12 |
+
|
| 13 |
+
for use in placeholder_node.users:
|
| 14 |
+
if use.target == torch.ops.aten.copy_.default:
|
| 15 |
+
if stack_trace := use.meta.get("stack_trace", None):
|
| 16 |
+
return stack_trace
|
| 17 |
+
|
| 18 |
+
return None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def format_default_skip_message(reason: str) -> str:
|
| 22 |
+
return f"skipping cudagraphs due to {reason}"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_mutation_stack_trace(
|
| 26 |
+
gm: torch.fx.GraphModule, mutation_indices: Iterable[int]
|
| 27 |
+
) -> str:
|
| 28 |
+
stack_trace: Optional[str] = ""
|
| 29 |
+
placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"]
|
| 30 |
+
|
| 31 |
+
for idx in mutation_indices:
|
| 32 |
+
placeholder = placeholders[idx]
|
| 33 |
+
if stack_trace := get_mutating_use_stack_trace(placeholder):
|
| 34 |
+
break
|
| 35 |
+
|
| 36 |
+
if stack_trace:
|
| 37 |
+
msg = f"skipping cudagraphs due to mutation on input. Found from : \n {stack_trace}"
|
| 38 |
+
return msg
|
| 39 |
+
|
| 40 |
+
return format_default_skip_message("mutated inputs")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def check_for_mutation(
|
| 44 |
+
gm: torch.fx.GraphModule, compiled_graph: CompiledFxGraph, num_fixed: int
|
| 45 |
+
) -> Optional[str]:
|
| 46 |
+
default_msg = format_default_skip_message("mutated inputs")
|
| 47 |
+
|
| 48 |
+
# doesnt work for non-trees because the warmup run would apply mutation twice
|
| 49 |
+
if torch._inductor.config.triton.cudagraph_trees:
|
| 50 |
+
# checking if mutation is only on parameters/static inputs
|
| 51 |
+
mutation_indices = [
|
| 52 |
+
idx for idx in compiled_graph.mutated_input_idxs if idx >= num_fixed
|
| 53 |
+
]
|
| 54 |
+
has_mutation = len(mutation_indices) != 0
|
| 55 |
+
if not has_mutation:
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
return get_mutation_stack_trace(gm, mutation_indices)
|
| 59 |
+
|
| 60 |
+
else:
|
| 61 |
+
has_mutation = len(compiled_graph.mutated_inputs) != 0
|
| 62 |
+
return None if not has_mutation else default_msg
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def get_use_stack_trace(node) -> Optional[str]:
|
| 66 |
+
for use in node.users:
|
| 67 |
+
if stack_trace := use.meta.get("stack_trace", None):
|
| 68 |
+
return stack_trace
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def check_multiple_devices_or_any_cpu_nodes(
|
| 73 |
+
device_node_mapping: Dict[torch.device, torch.fx.Node]
|
| 74 |
+
) -> Optional[str]:
|
| 75 |
+
if cpu_node := device_node_mapping.get(torch.device("cpu")):
|
| 76 |
+
if stack_trace := get_use_stack_trace(cpu_node):
|
| 77 |
+
return format_default_skip_message(
|
| 78 |
+
f"cpu device. Found from : \n {stack_trace}"
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
return format_default_skip_message("cpu device")
|
| 82 |
+
|
| 83 |
+
if (
|
| 84 |
+
len(device_node_mapping) == 1
|
| 85 |
+
and next(iter(device_node_mapping.keys())).type == "cuda"
|
| 86 |
+
):
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
keys_repr = (repr(key) for key in device_node_mapping.keys())
|
| 90 |
+
return format_default_skip_message(f"multiple devices: {', '.join(keys_repr)}")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def check_lowering_disable_cudagraph(
|
| 94 |
+
device_node_mapping: Dict[torch.device, torch.fx.Node]
|
| 95 |
+
):
|
| 96 |
+
return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclasses.dataclass
|
| 100 |
+
class BoxedDeviceIndex:
|
| 101 |
+
value: Optional[int]
|
| 102 |
+
|
| 103 |
+
def set(self, device_idx: Optional[int]):
|
| 104 |
+
assert device_idx is None or isinstance(device_idx, int)
|
| 105 |
+
self.value = device_idx
|
vila/lib/python3.10/site-packages/torch/_inductor/dependencies.py
ADDED
|
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import dataclasses
|
| 3 |
+
import itertools
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
import typing
|
| 7 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
| 8 |
+
from unittest.mock import patch
|
| 9 |
+
|
| 10 |
+
import sympy
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
|
| 14 |
+
|
| 15 |
+
from .codegen.common import index_prevent_reordering
|
| 16 |
+
from .utils import (
|
| 17 |
+
get_dtype_size,
|
| 18 |
+
reduction_num_outputs,
|
| 19 |
+
sympy_index_symbol,
|
| 20 |
+
sympy_str,
|
| 21 |
+
sympy_subs,
|
| 22 |
+
VarRanges,
|
| 23 |
+
)
|
| 24 |
+
from .virtualized import OpsHandler, ReductionType, V
|
| 25 |
+
|
| 26 |
+
log = logging.getLogger(__name__)
|
| 27 |
+
is_indirect = re.compile(r"indirect|tmp").search
|
| 28 |
+
Dep = Union["MemoryDep", "StarDep", "WeakDep"]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class MemoryDep(typing.NamedTuple):
|
| 32 |
+
name: str
|
| 33 |
+
index: sympy.Expr # type: ignore[assignment]
|
| 34 |
+
var_names: Tuple[sympy.Symbol, ...]
|
| 35 |
+
size: Tuple[sympy.Expr, ...]
|
| 36 |
+
|
| 37 |
+
def __repr__(self):
|
| 38 |
+
return f"MemoryDep({self.name!r}, {self.index}, {self.ranges})"
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def ranges(self) -> Dict[sympy.Symbol, sympy.Expr]:
|
| 42 |
+
"""{c0: 128, c1: 512, ...}"""
|
| 43 |
+
return dict(zip(self.var_names, self.size))
|
| 44 |
+
|
| 45 |
+
def get_numel(self) -> sympy.Expr:
|
| 46 |
+
if self.is_indirect():
|
| 47 |
+
numel = V.graph.get_numel(self.name)
|
| 48 |
+
else:
|
| 49 |
+
vars = set(self.index.free_symbols)
|
| 50 |
+
numel = sympy.Integer(1)
|
| 51 |
+
for var, size in zip(self.var_names, self.size):
|
| 52 |
+
if var in vars:
|
| 53 |
+
numel = numel * size
|
| 54 |
+
return numel
|
| 55 |
+
|
| 56 |
+
def rename(self, renames: Dict[str, str]) -> "MemoryDep":
|
| 57 |
+
if self.name in renames:
|
| 58 |
+
return MemoryDep(
|
| 59 |
+
renames[self.name], self.index, var_names=self.var_names, size=self.size
|
| 60 |
+
)
|
| 61 |
+
return self
|
| 62 |
+
|
| 63 |
+
def numbytes_hint(self):
|
| 64 |
+
return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
|
| 65 |
+
V.graph.get_dtype(self.name)
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def has_unbacked_symbols(self):
|
| 69 |
+
return len(free_unbacked_symbols(self.get_numel())) > 0
|
| 70 |
+
|
| 71 |
+
def is_contiguous(self) -> bool:
|
| 72 |
+
return isinstance(self.index, sympy.Symbol) and self.index in self.var_names
|
| 73 |
+
|
| 74 |
+
def is_scalar(self) -> bool:
|
| 75 |
+
if isinstance(self.index, sympy.Symbol):
|
| 76 |
+
return self.index not in self.var_names and not self.is_indirect()
|
| 77 |
+
return isinstance(self.index, (int, sympy.Integer))
|
| 78 |
+
|
| 79 |
+
def is_indirect(self) -> bool:
|
| 80 |
+
return any(is_indirect(v.name) for v in self.index.free_symbols) # type: ignore[attr-defined]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class StarDep(typing.NamedTuple):
|
| 84 |
+
# depends on the entire buffer
|
| 85 |
+
name: str
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def index(self):
|
| 89 |
+
raise NotImplementedError("StarDep does not have an index")
|
| 90 |
+
|
| 91 |
+
def get_numel(self) -> sympy.Expr:
|
| 92 |
+
return V.graph.get_numel(self.name)
|
| 93 |
+
|
| 94 |
+
def rename(self, renames: Dict[str, str]) -> "StarDep":
|
| 95 |
+
if self.name in renames:
|
| 96 |
+
return StarDep(renames[self.name])
|
| 97 |
+
return self
|
| 98 |
+
|
| 99 |
+
def numbytes_hint(self):
|
| 100 |
+
return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(
|
| 101 |
+
V.graph.get_dtype(self.name)
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def has_unbacked_symbols(self):
|
| 105 |
+
return len(free_unbacked_symbols(self.get_numel())) > 0
|
| 106 |
+
|
| 107 |
+
def is_contiguous(self) -> bool:
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
def is_scalar(self) -> bool:
|
| 111 |
+
return False
|
| 112 |
+
|
| 113 |
+
def is_indirect(self) -> bool:
|
| 114 |
+
return False
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# Used for tracking mutation ordering
|
| 118 |
+
# if A reads a buffer and B mutates it
|
| 119 |
+
# B must be ordered after A
|
| 120 |
+
#
|
| 121 |
+
# It is weak because if it turns out A's read is never used, we can still
|
| 122 |
+
# eliminate it
|
| 123 |
+
class WeakDep(typing.NamedTuple):
|
| 124 |
+
name: str
|
| 125 |
+
|
| 126 |
+
@property
|
| 127 |
+
def index(self):
|
| 128 |
+
raise NotImplementedError("WeakDep does not have an index")
|
| 129 |
+
|
| 130 |
+
def get_numel(self) -> sympy.Expr:
|
| 131 |
+
return sympy.Integer(1)
|
| 132 |
+
|
| 133 |
+
def rename(self, renames: Dict[str, str]) -> "WeakDep":
|
| 134 |
+
if self.name in renames:
|
| 135 |
+
return WeakDep(renames[self.name])
|
| 136 |
+
return self
|
| 137 |
+
|
| 138 |
+
def numbytes_hint(self):
|
| 139 |
+
return 1 # Purely inserted for ordering, not an actual dep
|
| 140 |
+
|
| 141 |
+
def has_unbacked_symbols(self):
|
| 142 |
+
return False
|
| 143 |
+
|
| 144 |
+
def is_contiguous(self) -> bool:
|
| 145 |
+
return False
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class IndexExprDep(typing.NamedTuple):
|
| 149 |
+
index: sympy.Expr # type: ignore[assignment]
|
| 150 |
+
var_names: Tuple[sympy.Symbol, ...]
|
| 151 |
+
size: Tuple[sympy.Expr, ...]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
@dataclasses.dataclass
|
| 155 |
+
class ReadWrites:
|
| 156 |
+
reads: Set[Dep]
|
| 157 |
+
writes: Set[Dep]
|
| 158 |
+
index_exprs: Set[IndexExprDep]
|
| 159 |
+
range_vars: Optional[List[sympy.Expr]] = None
|
| 160 |
+
var_ranges: Optional[VarRanges] = None
|
| 161 |
+
op_counts: typing.Counter[str] = dataclasses.field(
|
| 162 |
+
default_factory=collections.Counter
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
def rename(self, renames: typing.Dict[str, str]) -> "ReadWrites":
|
| 166 |
+
return ReadWrites(
|
| 167 |
+
{dep.rename(renames) for dep in self.reads},
|
| 168 |
+
{dep.rename(renames) for dep in self.writes},
|
| 169 |
+
self.index_exprs,
|
| 170 |
+
self.range_vars,
|
| 171 |
+
self.var_ranges,
|
| 172 |
+
op_counts=self.op_counts,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
def with_read(self, dep: Dep) -> "ReadWrites":
|
| 176 |
+
assert isinstance(dep, (WeakDep, StarDep))
|
| 177 |
+
return ReadWrites(
|
| 178 |
+
set.union(self.reads, {dep}),
|
| 179 |
+
self.writes,
|
| 180 |
+
self.index_exprs,
|
| 181 |
+
self.range_vars,
|
| 182 |
+
self.var_ranges,
|
| 183 |
+
op_counts=self.op_counts,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def merge(self, other: "ReadWrites"):
|
| 187 |
+
reads = set.union(self.reads, other.reads)
|
| 188 |
+
writes = set.union(self.writes, other.writes)
|
| 189 |
+
index_exprs = set.union(self.index_exprs, other.index_exprs)
|
| 190 |
+
op_counts = collections.Counter(self.op_counts)
|
| 191 |
+
op_counts.update(other.op_counts)
|
| 192 |
+
return ReadWrites(reads - writes, writes, index_exprs, op_counts=op_counts)
|
| 193 |
+
|
| 194 |
+
@staticmethod
|
| 195 |
+
def merge_list(read_writes: List["ReadWrites"]):
|
| 196 |
+
all_writes = set.union(*[rw.writes for rw in read_writes])
|
| 197 |
+
all_reads = set.union(*[rw.reads for rw in read_writes]) - all_writes
|
| 198 |
+
all_index_exprs = set.union(*[rw.index_exprs for rw in read_writes])
|
| 199 |
+
|
| 200 |
+
op_counts: typing.Counter[Any] = collections.Counter()
|
| 201 |
+
for rw in read_writes:
|
| 202 |
+
op_counts.update(rw.op_counts)
|
| 203 |
+
|
| 204 |
+
return ReadWrites(all_reads, all_writes, all_index_exprs, op_counts=op_counts)
|
| 205 |
+
|
| 206 |
+
def remove_reads(self, rem_reads):
|
| 207 |
+
return ReadWrites(
|
| 208 |
+
self.reads - rem_reads,
|
| 209 |
+
self.writes,
|
| 210 |
+
self.index_exprs,
|
| 211 |
+
self.range_vars,
|
| 212 |
+
self.var_ranges,
|
| 213 |
+
op_counts=self.op_counts,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def reads_and_writes(self):
|
| 217 |
+
return itertools.chain(self.reads, self.writes)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class _RecordLoadStoreInner(V.MockHandler): # type: ignore[name-defined]
|
| 221 |
+
def __init__(self, var_ranges: VarRanges, normalize: bool):
|
| 222 |
+
super().__init__()
|
| 223 |
+
self._reads: Set[Dep] = set()
|
| 224 |
+
self._writes: Set[MemoryDep] = set()
|
| 225 |
+
self._index_exprs: Set[IndexExprDep] = set()
|
| 226 |
+
self._var_ranges: VarRanges = var_ranges
|
| 227 |
+
self._normalize: bool = normalize
|
| 228 |
+
|
| 229 |
+
def canonicalize(
|
| 230 |
+
self, index: sympy.Expr
|
| 231 |
+
) -> Tuple[sympy.Expr, Tuple[sympy.Symbol, ...], Tuple[sympy.Expr, ...]]:
|
| 232 |
+
if not self._normalize:
|
| 233 |
+
sizes = [V.graph.sizevars.simplify(x) for x in self._var_ranges.values()]
|
| 234 |
+
var_names = tuple(
|
| 235 |
+
k for k, v in zip(self._var_ranges.keys(), sizes) if v != 1
|
| 236 |
+
)
|
| 237 |
+
sizes = tuple(v for v in sizes if v != 1)
|
| 238 |
+
return index, var_names, sizes # type: ignore[return-value]
|
| 239 |
+
|
| 240 |
+
# Try to further simplify the indexes even if simplify_loops didn't
|
| 241 |
+
# convert it to the simplest form because of the interference from
|
| 242 |
+
# different indexing formulas.
|
| 243 |
+
free_symbols = index.free_symbols
|
| 244 |
+
var_ranges = {
|
| 245 |
+
k: V.graph.sizevars.simplify(v)
|
| 246 |
+
for k, v in self._var_ranges.items()
|
| 247 |
+
# TODO(jansel): explore this further normalization
|
| 248 |
+
# if k in free_symbols
|
| 249 |
+
}
|
| 250 |
+
index_vars = [*var_ranges.keys()]
|
| 251 |
+
sizes = tuple(var_ranges.values())
|
| 252 |
+
new_sizes, reindex, prune = V.graph.sizevars._simplify_loops(
|
| 253 |
+
index_vars,
|
| 254 |
+
sizes,
|
| 255 |
+
index_prevent_reordering([index], index_vars, sizes),
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# assign new variables each dimension to deal with numbering mismatches
|
| 259 |
+
# d0, d1, d2 could become d0, d2 -- which won't match d0, d1
|
| 260 |
+
new_vars, add_var = var_builder(canonicalization_prefix())
|
| 261 |
+
replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))
|
| 262 |
+
index = sympy_subs(sympy.expand(index), replacement)
|
| 263 |
+
|
| 264 |
+
new_vars = [*new_vars.keys()]
|
| 265 |
+
new_sizes = [*new_sizes]
|
| 266 |
+
free_symbols = index.free_symbols
|
| 267 |
+
while new_vars and new_vars[-1] not in free_symbols:
|
| 268 |
+
# Reduction has last (reduced) dim in its sizes, but
|
| 269 |
+
# downstream users won't. Normalize this away.
|
| 270 |
+
new_vars.pop()
|
| 271 |
+
new_sizes.pop()
|
| 272 |
+
return index, tuple(new_vars), tuple(new_sizes) # type: ignore[arg-type]
|
| 273 |
+
|
| 274 |
+
def load(self, name: str, index: sympy.Expr) -> str:
|
| 275 |
+
self._reads.add(MemoryDep(name, *self.canonicalize(index)))
|
| 276 |
+
return f"load({name}, {sympy_str(index)})"
|
| 277 |
+
|
| 278 |
+
def load_seed(self, name: str, index: int):
|
| 279 |
+
assert isinstance(index, int)
|
| 280 |
+
return self.load(name, sympy.Integer(index))
|
| 281 |
+
|
| 282 |
+
def store(self, name: str, index: sympy.Expr, value: str, mode=None) -> str:
|
| 283 |
+
self._writes.add(MemoryDep(name, *self.canonicalize(index)))
|
| 284 |
+
return f"store({name}, {sympy_str(index)}, {value}, {mode})"
|
| 285 |
+
|
| 286 |
+
def store_reduction(self, name: str, index, value) -> str:
|
| 287 |
+
return self.store(name, index, f"store_reduction({value})")
|
| 288 |
+
|
| 289 |
+
def index_expr(self, index: sympy.Expr, dtype) -> str:
|
| 290 |
+
self._index_exprs.add(IndexExprDep(*self.canonicalize(index)))
|
| 291 |
+
return f"index_expr({sympy_str(index)}, {dtype})"
|
| 292 |
+
|
| 293 |
+
def bucketize(
|
| 294 |
+
self,
|
| 295 |
+
values,
|
| 296 |
+
offsets_name: str,
|
| 297 |
+
offsets_size: sympy.Expr,
|
| 298 |
+
indexing_dtype: torch.dtype,
|
| 299 |
+
right: bool,
|
| 300 |
+
):
|
| 301 |
+
self._reads.add(StarDep(offsets_name))
|
| 302 |
+
return f"bucketize({values}, {offsets_name}, {sympy_str(offsets_size)}, {indexing_dtype}, {right})"
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class _OpCounter:
|
| 306 |
+
"""Shim to count how many times each op is used"""
|
| 307 |
+
|
| 308 |
+
def __init__(self, inner):
|
| 309 |
+
super().__init__()
|
| 310 |
+
self.parent_handler = inner
|
| 311 |
+
self._op_counts: typing.Counter[Any] = collections.Counter()
|
| 312 |
+
|
| 313 |
+
def __getattr__(self, name):
|
| 314 |
+
self._op_counts[name] += 1
|
| 315 |
+
return getattr(self.parent_handler, name)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
class RecordLoadStore(V.KernelFormatterHandler): # type: ignore[name-defined]
|
| 319 |
+
def __init__(self, var_ranges: VarRanges, normalize: bool):
|
| 320 |
+
parent_handler = _RecordLoadStoreInner(
|
| 321 |
+
var_ranges=var_ranges, normalize=normalize
|
| 322 |
+
)
|
| 323 |
+
parent_handler = _OpCounter(parent_handler)
|
| 324 |
+
super().__init__(parent_handler=parent_handler)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
|
| 328 |
+
cnt = itertools.count()
|
| 329 |
+
var_ranges: VarRanges = dict()
|
| 330 |
+
|
| 331 |
+
def add_var(length: sympy.Expr) -> sympy.Symbol:
|
| 332 |
+
v = sympy_index_symbol(f"{prefix}{next(cnt)}")
|
| 333 |
+
var_ranges[v] = length
|
| 334 |
+
return v
|
| 335 |
+
|
| 336 |
+
return var_ranges, add_var
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def index_vars_no_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str):
|
| 340 |
+
var_ranges, add_var = var_builder(prefix)
|
| 341 |
+
args: List[List[sympy.Symbol]] = []
|
| 342 |
+
for size in argsizes:
|
| 343 |
+
args.append(list(map(add_var, size)))
|
| 344 |
+
return args, var_ranges
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def index_vars_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str = "d"):
|
| 348 |
+
from .ir import SqueezeView
|
| 349 |
+
|
| 350 |
+
var_ranges, add_var = var_builder(prefix)
|
| 351 |
+
args: List[List[sympy.Expr]] = []
|
| 352 |
+
new_sizes: List[List[sympy.Expr]] = []
|
| 353 |
+
for size in argsizes:
|
| 354 |
+
new_size, reindex = SqueezeView.squeezer(size)
|
| 355 |
+
new_sizes.append(new_size)
|
| 356 |
+
args.append(reindex(list(map(add_var, new_size))))
|
| 357 |
+
return args, var_ranges
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def extract_read_writes(
|
| 361 |
+
fn: Callable[..., Any],
|
| 362 |
+
*argsizes: Tuple[sympy.Expr, ...],
|
| 363 |
+
normalize: bool = False,
|
| 364 |
+
prefix: str = "d",
|
| 365 |
+
):
|
| 366 |
+
args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)
|
| 367 |
+
rw = RecordLoadStore(var_ranges, normalize=normalize)
|
| 368 |
+
with V.set_ops_handler(rw):
|
| 369 |
+
fn(*args)
|
| 370 |
+
|
| 371 |
+
if normalize:
|
| 372 |
+
range_vars = [] # Number of vars could differ due to normalization
|
| 373 |
+
else:
|
| 374 |
+
range_vars = list(itertools.chain.from_iterable(args))
|
| 375 |
+
|
| 376 |
+
inner = rw.parent_handler.parent_handler
|
| 377 |
+
return ReadWrites(
|
| 378 |
+
set(inner._reads),
|
| 379 |
+
set(inner._writes),
|
| 380 |
+
inner._index_exprs,
|
| 381 |
+
range_vars,
|
| 382 |
+
var_ranges,
|
| 383 |
+
rw.parent_handler._op_counts,
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def extract_input_node_reduction_ranges(
|
| 388 |
+
input_node: "torch._inductor.ir.TensorBox",
|
| 389 |
+
) -> Tuple[Optional[List[sympy.Expr]], Optional[List[sympy.Expr]]]:
|
| 390 |
+
"""
|
| 391 |
+
Returns the size and reduction size of all inputs, if the sizes and reduction_sizes (if exist) are all the same.
|
| 392 |
+
It's possible that a node has multiple inputs, some are Reduction nodes and others are Pointwise nodes.
|
| 393 |
+
In this case, reduction_sizes of the Reduction nodes need to be the same.
|
| 394 |
+
Otherwise returns (None, None).
|
| 395 |
+
"""
|
| 396 |
+
|
| 397 |
+
from .ir import ComputedBuffer, Loops
|
| 398 |
+
|
| 399 |
+
if isinstance(input_node.data, ComputedBuffer):
|
| 400 |
+
# Input node has already been realized. Return its size and reduction_size.
|
| 401 |
+
size = input_node.get_size()
|
| 402 |
+
reduction_size = input_node.get_reduction_size()
|
| 403 |
+
if len(reduction_size) > 0:
|
| 404 |
+
return (size, reduction_size)
|
| 405 |
+
else:
|
| 406 |
+
return (None, None)
|
| 407 |
+
|
| 408 |
+
if not isinstance(input_node.data.data, Loops): # type: ignore[attr-defined]
|
| 409 |
+
# Other IRNodes do not have reduction_ranges.
|
| 410 |
+
return (None, None)
|
| 411 |
+
|
| 412 |
+
# There is one issue: what if there are views / permutations between the input node and its dependent realized nodes?
|
| 413 |
+
# The current method still uses reduction ranges from the dependent realized node, which is not ideal.
|
| 414 |
+
# Is there a way to check whether there are permutations inbetween?
|
| 415 |
+
reads = input_node.get_reads()
|
| 416 |
+
reduction_size = None
|
| 417 |
+
size = None
|
| 418 |
+
while reduction_size is None and len(reads) > 0:
|
| 419 |
+
seen = set()
|
| 420 |
+
new_reads = []
|
| 421 |
+
for read in reads:
|
| 422 |
+
if not isinstance(read, MemoryDep):
|
| 423 |
+
continue
|
| 424 |
+
if read.name in seen:
|
| 425 |
+
continue
|
| 426 |
+
seen.add(read.name)
|
| 427 |
+
buffer = V.graph.get_buffer(read.name)
|
| 428 |
+
if buffer is None:
|
| 429 |
+
continue
|
| 430 |
+
if (
|
| 431 |
+
isinstance(buffer, ComputedBuffer)
|
| 432 |
+
and len(buffer.get_reduction_size()) > 0
|
| 433 |
+
):
|
| 434 |
+
if reduction_size is None:
|
| 435 |
+
reduction_size = buffer.get_reduction_size()
|
| 436 |
+
size = buffer.get_size()
|
| 437 |
+
elif (
|
| 438 |
+
reduction_size != buffer.get_reduction_size()
|
| 439 |
+
or size != buffer.get_size()
|
| 440 |
+
):
|
| 441 |
+
return (None, None)
|
| 442 |
+
else:
|
| 443 |
+
new_reads.extend(buffer.get_reads())
|
| 444 |
+
if reads == new_reads:
|
| 445 |
+
return (size, reduction_size)
|
| 446 |
+
else:
|
| 447 |
+
reads = new_reads
|
| 448 |
+
return (size, reduction_size)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def canonicalization_prefix():
|
| 452 |
+
return "c"
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
# ops handler which computes all the free unbacked symbols for an IR
|
| 456 |
+
class FreeUnbackedSymbolsOpsHandler:
|
| 457 |
+
symbols: Set[sympy.Symbol]
|
| 458 |
+
|
| 459 |
+
def __init__(self):
|
| 460 |
+
self.symbols = set()
|
| 461 |
+
|
| 462 |
+
def __getattr__(self, name: str) -> Callable[..., Any]:
|
| 463 |
+
def inner(*args, **kwargs):
|
| 464 |
+
for a in itertools.chain(args, kwargs.values()):
|
| 465 |
+
if isinstance(a, (sympy.Expr, sympy.logic.boolalg.Boolean)):
|
| 466 |
+
self.symbols |= free_unbacked_symbols(a)
|
| 467 |
+
|
| 468 |
+
return inner
|
| 469 |
+
|
| 470 |
+
def indirect_indexing(self, index_var, size, check=True) -> sympy.Symbol:
|
| 471 |
+
assert not isinstance(index_var, (sympy.Expr, sympy.logic.boolalg.Boolean))
|
| 472 |
+
self.symbols |= free_unbacked_symbols(size)
|
| 473 |
+
return sympy_index_symbol(f"({str(index_var)})")
|
| 474 |
+
|
| 475 |
+
def frexp(self, x):
|
| 476 |
+
return (None,) * 2
|
| 477 |
+
|
| 478 |
+
def reduction(
|
| 479 |
+
self,
|
| 480 |
+
dtype: torch.dtype,
|
| 481 |
+
src_dtype: torch.dtype,
|
| 482 |
+
reduction_type: ReductionType,
|
| 483 |
+
value: Union[None, Tuple[None, ...]],
|
| 484 |
+
) -> Union[None, Tuple[None, ...]]:
|
| 485 |
+
num_values = reduction_num_outputs(reduction_type)
|
| 486 |
+
return (None,) * num_values if num_values > 1 else None
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def _typecheck_FreeUnbackedSymbolsOpsHandler(
|
| 490 |
+
h: FreeUnbackedSymbolsOpsHandler,
|
| 491 |
+
) -> OpsHandler[None]:
|
| 492 |
+
return h
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def extract_free_unbacked_symbols(fn: Callable[..., Any], index, rindex=None):
|
| 496 |
+
from .ir import FlexibleLayout
|
| 497 |
+
|
| 498 |
+
args = [index, rindex] if rindex is not None else [index]
|
| 499 |
+
handler = FreeUnbackedSymbolsOpsHandler()
|
| 500 |
+
# NB: I cargo culted the allow_indexing patch here, I don't understand why
|
| 501 |
+
# people do this all over
|
| 502 |
+
with V.set_ops_handler(handler), patch.object(
|
| 503 |
+
FlexibleLayout, "allow_indexing", True
|
| 504 |
+
):
|
| 505 |
+
fn(*args)
|
| 506 |
+
return handler.symbols
|
vila/lib/python3.10/site-packages/torch/_inductor/exc.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import tempfile
|
| 5 |
+
import textwrap
|
| 6 |
+
from functools import lru_cache
|
| 7 |
+
|
| 8 |
+
if os.environ.get("TORCHINDUCTOR_WRITE_MISSING_OPS") == "1":
|
| 9 |
+
|
| 10 |
+
@lru_cache(None)
|
| 11 |
+
def _record_missing_op(target):
|
| 12 |
+
with open(f"{tempfile.gettempdir()}/missing_ops.txt", "a") as fd:
|
| 13 |
+
fd.write(str(target) + "\n")
|
| 14 |
+
|
| 15 |
+
else:
|
| 16 |
+
|
| 17 |
+
def _record_missing_op(target): # type: ignore[misc]
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class OperatorIssue(RuntimeError):
|
| 22 |
+
@staticmethod
|
| 23 |
+
def operator_str(target, args, kwargs):
|
| 24 |
+
lines = [f"target: {target}"] + [
|
| 25 |
+
f"args[{i}]: {arg}" for i, arg in enumerate(args)
|
| 26 |
+
]
|
| 27 |
+
if kwargs:
|
| 28 |
+
lines.append(f"kwargs: {kwargs}")
|
| 29 |
+
return textwrap.indent("\n".join(lines), " ")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class MissingOperatorWithoutDecomp(OperatorIssue):
|
| 33 |
+
def __init__(self, target, args, kwargs):
|
| 34 |
+
_record_missing_op(target)
|
| 35 |
+
super().__init__(f"missing lowering\n{self.operator_str(target, args, kwargs)}")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class MissingOperatorWithDecomp(OperatorIssue):
|
| 39 |
+
def __init__(self, target, args, kwargs):
|
| 40 |
+
_record_missing_op(target)
|
| 41 |
+
super().__init__(
|
| 42 |
+
f"missing decomposition\n{self.operator_str(target, args, kwargs)}"
|
| 43 |
+
+ textwrap.dedent(
|
| 44 |
+
f"""
|
| 45 |
+
|
| 46 |
+
There is a decomposition available for {target} in
|
| 47 |
+
torch._decomp.get_decompositions(). Please add this operator to the
|
| 48 |
+
`decompositions` list in torch._inductor.decompositions
|
| 49 |
+
"""
|
| 50 |
+
)
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class LoweringException(OperatorIssue):
|
| 55 |
+
def __init__(self, exc: Exception, target, args, kwargs):
|
| 56 |
+
super().__init__(
|
| 57 |
+
f"{type(exc).__name__}: {exc}\n{self.operator_str(target, args, kwargs)}"
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class InvalidCxxCompiler(RuntimeError):
|
| 62 |
+
def __init__(self):
|
| 63 |
+
from . import config
|
| 64 |
+
|
| 65 |
+
super().__init__(
|
| 66 |
+
f"No working C++ compiler found in {config.__name__}.cpp.cxx: {config.cpp.cxx}"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class CppWrapperCodeGenError(RuntimeError):
|
| 71 |
+
def __init__(self, msg: str):
|
| 72 |
+
super().__init__(f"C++ wrapper codegen error: {msg}")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class CppCompileError(RuntimeError):
|
| 76 |
+
def __init__(self, cmd: list[str], output: str):
|
| 77 |
+
if isinstance(output, bytes):
|
| 78 |
+
output = output.decode("utf-8")
|
| 79 |
+
|
| 80 |
+
super().__init__(
|
| 81 |
+
textwrap.dedent(
|
| 82 |
+
"""
|
| 83 |
+
C++ compile error
|
| 84 |
+
|
| 85 |
+
Command:
|
| 86 |
+
{cmd}
|
| 87 |
+
|
| 88 |
+
Output:
|
| 89 |
+
{output}
|
| 90 |
+
"""
|
| 91 |
+
)
|
| 92 |
+
.strip()
|
| 93 |
+
.format(cmd=" ".join(cmd), output=output)
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class CUDACompileError(CppCompileError):
|
| 98 |
+
pass
|
vila/lib/python3.10/site-packages/torch/_inductor/freezing.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
import weakref
|
| 7 |
+
from typing import Any, List, Optional, Tuple
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch.utils._pytree as pytree
|
| 11 |
+
from torch._dynamo.utils import dynamo_timed, lazy_format_graph_code
|
| 12 |
+
from torch._functorch.aot_autograd import MutationType
|
| 13 |
+
from torch._functorch.compile_utils import fx_graph_cse
|
| 14 |
+
from torch._inductor.constant_folding import constant_fold, replace_node_with_constant
|
| 15 |
+
|
| 16 |
+
from torch._inductor.fx_passes.freezing_patterns import freezing_passes
|
| 17 |
+
from torch._inductor.fx_passes.post_grad import view_to_reshape
|
| 18 |
+
|
| 19 |
+
from . import config
|
| 20 |
+
|
| 21 |
+
aten = torch.ops.aten
|
| 22 |
+
prims = torch.ops.prims
|
| 23 |
+
|
| 24 |
+
log = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def replace_params_with_constants(
|
| 28 |
+
gm: torch.fx.GraphModule,
|
| 29 |
+
flat_params: list[Any],
|
| 30 |
+
fw_metadata: torch._functorch.aot_autograd.ViewAndMutationMeta,
|
| 31 |
+
) -> List[int]:
|
| 32 |
+
"""
|
| 33 |
+
Replaces the parameters of a PyTorch GraphModule with constants wherever possible.
|
| 34 |
+
Returns a list of indices representing the input parameters that were not converted to constants.
|
| 35 |
+
"""
|
| 36 |
+
params = [node for node in gm.graph.nodes if node.op == "placeholder"]
|
| 37 |
+
fake_inp_nodes = params[: len(params)]
|
| 38 |
+
preserved_arg_indices = []
|
| 39 |
+
aliased_input_args = [
|
| 40 |
+
out_info.base_idx
|
| 41 |
+
for out_info in fw_metadata.output_info
|
| 42 |
+
if out_info.base_idx is not None
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
# TODO (tmanlaibaatar) figure out why this is different
|
| 46 |
+
# from mutated_inp_runtime_indices
|
| 47 |
+
mutated_inps = [
|
| 48 |
+
i
|
| 49 |
+
for i, m in enumerate(fw_metadata.input_info)
|
| 50 |
+
if m.mutation_type
|
| 51 |
+
in (MutationType.MUTATED_IN_GRAPH, MutationType.MUTATED_OUT_GRAPH)
|
| 52 |
+
]
|
| 53 |
+
|
| 54 |
+
for i, (real_input, node) in enumerate(zip(flat_params, fake_inp_nodes)):
|
| 55 |
+
if i in mutated_inps or i in aliased_input_args:
|
| 56 |
+
preserved_arg_indices.append(i)
|
| 57 |
+
continue
|
| 58 |
+
replace_node_with_constant(gm, node, real_input)
|
| 59 |
+
# add on non param inputs
|
| 60 |
+
preserved_arg_indices.extend(range(len(flat_params), len(params)))
|
| 61 |
+
# is this necessary ?
|
| 62 |
+
gm.recompile()
|
| 63 |
+
return preserved_arg_indices
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def freeze(
|
| 67 |
+
dynamo_gm: torch.fx.GraphModule,
|
| 68 |
+
aot_autograd_gm: torch.fx.GraphModule,
|
| 69 |
+
example_inputs: List[torch._subclasses.FakeTensor],
|
| 70 |
+
) -> Tuple[torch.fx.GraphModule, List[int]]:
|
| 71 |
+
"""
|
| 72 |
+
Inlines parameters that are not mutated into constants and optimizes the graph through constant propagation
|
| 73 |
+
and other techniques. If enabled, the function also discards the original parameters of the module for memory efficiency.
|
| 74 |
+
|
| 75 |
+
Assumes that this function is run in dynamo tracing post aot_autograd.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
dynamo_gm (torch.fx.GraphModule): The Dynamo constructed GraphModule.
|
| 79 |
+
aot_autograd_gm (torch.fx.GraphModule): The aot_autograd constructed GraphModule to be frozen.
|
| 80 |
+
example_inputs (List[torch.Tensor]): A list of example input tensors to be used in the freezing process.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Tuple[torch.fx.GraphModule, List[int]]: A tuple containing the frozen GraphModule and a list of indices
|
| 84 |
+
of the inputs that were preserved (not turned into constants).
|
| 85 |
+
"""
|
| 86 |
+
# We have convert conv's weight to channels last which may meet error for .view
|
| 87 |
+
# when doing fake_tensor_prop. So we need to convert view to reshape first.
|
| 88 |
+
# See the details in fx_codegen_and_compile of compile_fx.py.
|
| 89 |
+
view_to_reshape(aot_autograd_gm)
|
| 90 |
+
|
| 91 |
+
if tracing_context := torch._guards.TracingContext.try_get():
|
| 92 |
+
fw_metadata = tracing_context.fw_metadata
|
| 93 |
+
params_flat = tracing_context.params_flat
|
| 94 |
+
assert fw_metadata is not None and params_flat is not None
|
| 95 |
+
|
| 96 |
+
preserved_arg_indices = replace_params_with_constants(
|
| 97 |
+
aot_autograd_gm, params_flat, fw_metadata
|
| 98 |
+
)
|
| 99 |
+
else:
|
| 100 |
+
inputs = [
|
| 101 |
+
node for node in aot_autograd_gm.graph.nodes if node.op == "placeholder"
|
| 102 |
+
]
|
| 103 |
+
preserved_arg_indices = list(range(len(inputs)))
|
| 104 |
+
|
| 105 |
+
# TODO - further restrict cse ? right now needed to dedup aliasing ops
|
| 106 |
+
cse_graph = fx_graph_cse(aot_autograd_gm.graph)
|
| 107 |
+
aot_autograd_gm.graph = cse_graph
|
| 108 |
+
aot_autograd_gm.recompile()
|
| 109 |
+
|
| 110 |
+
aot_example_inputs = [example_inputs[ind] for ind in preserved_arg_indices]
|
| 111 |
+
freezing_passes(aot_autograd_gm, aot_example_inputs)
|
| 112 |
+
|
| 113 |
+
constant_fold(aot_autograd_gm)
|
| 114 |
+
# invalidate nn Modules
|
| 115 |
+
if config.freezing_discard_parameters:
|
| 116 |
+
invalidate_eager_modules()
|
| 117 |
+
discard_traced_gm_params(dynamo_gm)
|
| 118 |
+
|
| 119 |
+
log.debug("%s", lazy_format_graph_code("FROZEN GRAPH", aot_autograd_gm))
|
| 120 |
+
|
| 121 |
+
return aot_autograd_gm, preserved_arg_indices
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class ErasedTensor(torch.Tensor):
|
| 125 |
+
@staticmethod
|
| 126 |
+
def __new__(cls, elem, name, owning_mod):
|
| 127 |
+
return super().__new__(cls, elem.to(device="meta"))
|
| 128 |
+
|
| 129 |
+
def __init__(self, elem, name: Optional[str], mod):
|
| 130 |
+
self.erased_name = name
|
| 131 |
+
self.owning_mod_ref = weakref.ref(mod)
|
| 132 |
+
|
| 133 |
+
@classmethod
|
| 134 |
+
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
| 135 |
+
erased_tensors = [
|
| 136 |
+
e
|
| 137 |
+
for e in pytree.arg_tree_leaves(*args, **kwargs)
|
| 138 |
+
if isinstance(e, ErasedTensor)
|
| 139 |
+
]
|
| 140 |
+
assert len(erased_tensors) > 0
|
| 141 |
+
e = erased_tensors[0]
|
| 142 |
+
|
| 143 |
+
raise RuntimeError(
|
| 144 |
+
f"Trying to run Pytorch Eager Module after Dynamo Freezing. "
|
| 145 |
+
"The original parameters have been discarded for memory efficiency. "
|
| 146 |
+
f"Found in op {func} for erased parameter {e.erased_name} of {e.owning_mod_ref()}"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 151 |
+
def invalidate_eager_modules():
|
| 152 |
+
for mod in torch._guards.TracingContext.get().module_context.nn_modules.values():
|
| 153 |
+
if not isinstance(mod, torch.nn.Module):
|
| 154 |
+
continue
|
| 155 |
+
|
| 156 |
+
for attr_name, tensor in list(
|
| 157 |
+
itertools.chain(
|
| 158 |
+
mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
|
| 159 |
+
)
|
| 160 |
+
):
|
| 161 |
+
with torch._dispatch.python.no_python_dispatcher():
|
| 162 |
+
e_t = ErasedTensor(tensor, attr_name, mod)
|
| 163 |
+
if isinstance(tensor, torch.nn.Parameter):
|
| 164 |
+
e_t.requires_grad_(True)
|
| 165 |
+
e_t._is_param = True # type: ignore[attr-defined]
|
| 166 |
+
setattr(mod, attr_name, e_t)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 170 |
+
def discard_traced_gm_params(mod: torch.fx.GraphModule):
|
| 171 |
+
for attr_name, tensor in list(
|
| 172 |
+
itertools.chain(
|
| 173 |
+
mod.named_parameters(recurse=False), mod.named_buffers(recurse=False)
|
| 174 |
+
)
|
| 175 |
+
):
|
| 176 |
+
with torch._dispatch.python.no_python_dispatcher():
|
| 177 |
+
e_t = ErasedTensor(tensor, attr_name, mod)
|
| 178 |
+
if isinstance(tensor, torch.nn.Parameter):
|
| 179 |
+
e_t.requires_grad_(True)
|
| 180 |
+
e_t._is_param = True # type: ignore[attr-defined]
|
| 181 |
+
setattr(mod, attr_name, e_t)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def enforce_output_layout(gm: torch.fx.GraphModule):
|
| 185 |
+
"""
|
| 186 |
+
Make sure the output node's layout does not change due to compiler optimizations
|
| 187 |
+
by adding aten.as_strided nodes with the expected strides.
|
| 188 |
+
|
| 189 |
+
Only used for inference so we can assume all graph outputs are model outputs.
|
| 190 |
+
"""
|
| 191 |
+
*_, output_node = gm.graph.nodes
|
| 192 |
+
out_list = output_node.args[0]
|
| 193 |
+
with gm.graph.inserting_before(output_node):
|
| 194 |
+
for n in out_list:
|
| 195 |
+
if not isinstance(
|
| 196 |
+
n.meta["val"], torch.Tensor
|
| 197 |
+
) or not torch._prims_common.is_non_overlapping_and_dense(n.meta["val"]):
|
| 198 |
+
continue
|
| 199 |
+
|
| 200 |
+
# add a node to enforce eager layout
|
| 201 |
+
ft = n.meta["val"]
|
| 202 |
+
new_node = gm.graph.call_function(
|
| 203 |
+
prims.inductor_force_stride_order.default, (n, ft.stride())
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# can not call
|
| 207 |
+
# n.replace_all_uses_with(new_node)
|
| 208 |
+
# since it will replace the usage of n in new_node itself.
|
| 209 |
+
output_node.replace_input_with(n, new_node)
|
| 210 |
+
|
| 211 |
+
gm.graph.lint()
|
| 212 |
+
gm.recompile()
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def enforce_as_strided_input_layout(gm: torch.fx.GraphModule):
|
| 216 |
+
"""
|
| 217 |
+
Make sure the as_strided node's input's layout does not change due to compiler
|
| 218 |
+
optimizations, because the as_strided strides info depends on input tensor stride info.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
as_strided_ops = [
|
| 222 |
+
torch.ops.aten.as_strided.default,
|
| 223 |
+
torch.ops.aten.as_strided_.default,
|
| 224 |
+
torch.ops.aten.as_strided_scatter.default,
|
| 225 |
+
]
|
| 226 |
+
strided_nodes = [n for n in gm.graph.nodes if n.target in as_strided_ops]
|
| 227 |
+
for n in strided_nodes:
|
| 228 |
+
with gm.graph.inserting_before(n):
|
| 229 |
+
# add a node to enforce eager layout
|
| 230 |
+
ft = n.args[0].meta["val"]
|
| 231 |
+
new_node = gm.graph.call_function(
|
| 232 |
+
prims.inductor_force_stride_order.default, (n.args[0], ft.stride())
|
| 233 |
+
)
|
| 234 |
+
n.replace_input_with(n.args[0], new_node)
|
| 235 |
+
|
| 236 |
+
gm.graph.lint()
|
| 237 |
+
gm.recompile()
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@dynamo_timed
|
| 241 |
+
def convert_conv_weights_to_channels_last(gm: torch.fx.GraphModule):
|
| 242 |
+
"""
|
| 243 |
+
Convert 4d convolution weight tensor to channels last format.
|
| 244 |
+
|
| 245 |
+
This pass is performed before freezing so the added nodes can be constant
|
| 246 |
+
folded by freezing.
|
| 247 |
+
"""
|
| 248 |
+
convs = [n for n in gm.graph.nodes if n.target == aten.convolution.default]
|
| 249 |
+
for conv in convs:
|
| 250 |
+
weight_node = conv.args[1]
|
| 251 |
+
if len(weight_node.meta["val"].size()) != 4 or weight_node.meta[
|
| 252 |
+
"val"
|
| 253 |
+
].is_contiguous(memory_format=torch.channels_last):
|
| 254 |
+
# not a 4d tensor or already channels last, skip
|
| 255 |
+
continue
|
| 256 |
+
|
| 257 |
+
with gm.graph.inserting_before(conv):
|
| 258 |
+
new_node = gm.graph.call_function(
|
| 259 |
+
aten.clone.default,
|
| 260 |
+
(weight_node,),
|
| 261 |
+
{"memory_format": torch.channels_last},
|
| 262 |
+
)
|
| 263 |
+
conv.replace_input_with(weight_node, new_node)
|
| 264 |
+
|
| 265 |
+
enforce_as_strided_input_layout(gm)
|
| 266 |
+
enforce_output_layout(gm)
|
vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch._inductor.compile_fx import fake_tensor_prop
|
| 5 |
+
from ..._dynamo.utils import counters
|
| 6 |
+
|
| 7 |
+
from .. import config
|
| 8 |
+
from ..pattern_matcher import (
|
| 9 |
+
_return_true,
|
| 10 |
+
CallFunction,
|
| 11 |
+
fwd_only,
|
| 12 |
+
Ignored,
|
| 13 |
+
init_once_fakemode,
|
| 14 |
+
KeywordArg,
|
| 15 |
+
Match,
|
| 16 |
+
PatternMatcherPass,
|
| 17 |
+
register_graph_pattern,
|
| 18 |
+
register_replacement,
|
| 19 |
+
stable_topological_sort,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
aten = torch.ops.aten
|
| 23 |
+
|
| 24 |
+
# First pass_patterns[0] are applied, then [1], then [2]
|
| 25 |
+
pass_patterns = [
|
| 26 |
+
PatternMatcherPass(),
|
| 27 |
+
PatternMatcherPass(),
|
| 28 |
+
PatternMatcherPass(),
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
binary_folding_pass = PatternMatcherPass()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):
|
| 35 |
+
"""
|
| 36 |
+
Passes that are applied to the graph to freeze pass.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
from ..freezing import constant_fold
|
| 40 |
+
|
| 41 |
+
lazy_init()
|
| 42 |
+
# We need a few rounds of binary folding to get rid of all the
|
| 43 |
+
# unnecessary nodes, but may need a good method to chose the rounds number.
|
| 44 |
+
# works like: conv+binary+binary.
|
| 45 |
+
binary_folding = counters["inductor"]["binary_folding"]
|
| 46 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 47 |
+
|
| 48 |
+
torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_convs(gm)
|
| 49 |
+
for _ in range(4):
|
| 50 |
+
constant_fold(gm)
|
| 51 |
+
# Make sure meta['val'] is properly set for all nodes
|
| 52 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 53 |
+
binary_folding_pass.apply(gm.graph) # type: ignore[arg-type]
|
| 54 |
+
# If we don't have binary folding, we don't need to run the pass again.
|
| 55 |
+
# TODO: remove the need to run fake_tensor_prop on the whole model.
|
| 56 |
+
if counters["inductor"]["binary_folding"] == binary_folding:
|
| 57 |
+
break
|
| 58 |
+
binary_folding = counters["inductor"]["binary_folding"]
|
| 59 |
+
|
| 60 |
+
torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_convs(gm)
|
| 61 |
+
|
| 62 |
+
constant_fold(gm)
|
| 63 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 64 |
+
|
| 65 |
+
for pattern in pass_patterns:
|
| 66 |
+
pattern.apply(gm.graph) # type: ignore[arg-type]
|
| 67 |
+
|
| 68 |
+
# The CPU weight packing always assume the conv's weight is channels last,
|
| 69 |
+
# So make sure the layout_optimization is on when doing it.
|
| 70 |
+
if (
|
| 71 |
+
torch._C._has_mkldnn
|
| 72 |
+
and config.cpp.weight_prepack
|
| 73 |
+
and config.layout_optimization
|
| 74 |
+
):
|
| 75 |
+
from .mkldnn_fusion import _eliminate_duplicate_packed_nodes
|
| 76 |
+
|
| 77 |
+
_eliminate_duplicate_packed_nodes(gm)
|
| 78 |
+
|
| 79 |
+
stable_topological_sort(gm.graph)
|
| 80 |
+
gm.recompile()
|
| 81 |
+
gm.graph.lint()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@init_once_fakemode
|
| 85 |
+
def lazy_init():
|
| 86 |
+
if torch._C._has_mkldnn and config.cpp.weight_prepack:
|
| 87 |
+
from .mkldnn_fusion import _mkldnn_weight_pack_init
|
| 88 |
+
|
| 89 |
+
_mkldnn_weight_pack_init()
|
| 90 |
+
|
| 91 |
+
from .binary_folding import binary_folding_init
|
| 92 |
+
|
| 93 |
+
addmm_patterns_init()
|
| 94 |
+
binary_folding_init()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def register_freezing_graph_pattern(pattern, extra_check=_return_true, pass_number=0):
|
| 98 |
+
return register_graph_pattern(
|
| 99 |
+
pattern,
|
| 100 |
+
extra_check=extra_check,
|
| 101 |
+
pass_dict=pass_patterns[pass_number],
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def register_binary_folding_pattern(pattern, extra_check=_return_true):
|
| 106 |
+
return register_graph_pattern(
|
| 107 |
+
pattern,
|
| 108 |
+
extra_check=extra_check,
|
| 109 |
+
pass_dict=binary_folding_pass,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
@functools.lru_cache(None)
|
| 114 |
+
def addmm_patterns_init():
|
| 115 |
+
if torch.cuda.is_available():
|
| 116 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 117 |
+
device = "cuda"
|
| 118 |
+
else:
|
| 119 |
+
device = "cpu"
|
| 120 |
+
val = functools.partial(torch.empty, (10, 10), device=device, requires_grad=False)
|
| 121 |
+
|
| 122 |
+
def check_concat_weights(match):
|
| 123 |
+
weights = [
|
| 124 |
+
match.kwargs["w1"],
|
| 125 |
+
match.kwargs["w2"],
|
| 126 |
+
]
|
| 127 |
+
if "w3" in match.kwargs:
|
| 128 |
+
weights.append(match.kwargs["w3"])
|
| 129 |
+
|
| 130 |
+
return all(
|
| 131 |
+
w.op == "get_attr" and w.meta["val"].shape == weights[0].meta["val"].shape
|
| 132 |
+
for w in weights
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
def matmul_fuse_pattern(inp, w1, w2, w3):
|
| 136 |
+
return (inp @ w1, inp @ w2, inp @ w3)
|
| 137 |
+
|
| 138 |
+
def matmul_replacement(inp, w1, w2, w3):
|
| 139 |
+
cat_t = torch.cat((w1, w2, w3), dim=1)
|
| 140 |
+
mm = inp @ cat_t
|
| 141 |
+
return mm.chunk(3, dim=1)
|
| 142 |
+
|
| 143 |
+
register_replacement(
|
| 144 |
+
matmul_fuse_pattern,
|
| 145 |
+
matmul_replacement,
|
| 146 |
+
[val(), val(), val(), val()],
|
| 147 |
+
fwd_only,
|
| 148 |
+
pass_patterns[0],
|
| 149 |
+
extra_check=check_concat_weights,
|
| 150 |
+
exclusive_arg_names=("w1", "w2", "w3"),
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
def matmul_fuse_pattern_two(inp, w1, w2):
|
| 154 |
+
return (inp @ w1, inp @ w2)
|
| 155 |
+
|
| 156 |
+
def matmul_replacement_two(inp, w1, w2):
|
| 157 |
+
cat_t = torch.cat((w1, w2), dim=1)
|
| 158 |
+
mm = inp @ cat_t
|
| 159 |
+
return mm.chunk(2, dim=1)
|
| 160 |
+
|
| 161 |
+
register_replacement(
|
| 162 |
+
matmul_fuse_pattern_two,
|
| 163 |
+
matmul_replacement_two,
|
| 164 |
+
[val(), val(), val()],
|
| 165 |
+
fwd_only,
|
| 166 |
+
pass_patterns[0],
|
| 167 |
+
extra_check=check_concat_weights,
|
| 168 |
+
exclusive_arg_names=("w1", "w2"),
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
def addmm_fuse_pattern_second(inp, w1, w2, w3, b1, b2, b3):
|
| 172 |
+
return (
|
| 173 |
+
aten.addmm(b1, inp, w1),
|
| 174 |
+
aten.addmm(b2, inp, w2),
|
| 175 |
+
aten.addmm(b3, inp, w3),
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
def addmm_fuse_replacement_second(inp, w1, w2, w3, b1, b2, b3):
|
| 179 |
+
cat_w = torch.cat((w1, w2, w3), dim=1)
|
| 180 |
+
cat_b = torch.cat((b1, b2, b3))
|
| 181 |
+
return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
|
| 182 |
+
|
| 183 |
+
register_replacement(
|
| 184 |
+
addmm_fuse_pattern_second,
|
| 185 |
+
addmm_fuse_replacement_second,
|
| 186 |
+
[val() for _ in range(7)],
|
| 187 |
+
fwd_only,
|
| 188 |
+
pass_patterns[0],
|
| 189 |
+
extra_check=check_concat_weights,
|
| 190 |
+
exclusive_arg_names=("w1", "w2", "w3", "b1", "b2", "b3"),
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def same_dtype(match):
|
| 195 |
+
return match.output_node().args[0].meta["val"].dtype == match.kwargs["dtype"]
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@register_graph_pattern(
|
| 199 |
+
CallFunction(
|
| 200 |
+
torch.ops.prims.convert_element_type.default,
|
| 201 |
+
Ignored(),
|
| 202 |
+
KeywordArg("dtype"),
|
| 203 |
+
),
|
| 204 |
+
pass_dict=pass_patterns[0],
|
| 205 |
+
extra_check=same_dtype,
|
| 206 |
+
)
|
| 207 |
+
def unnecessary_dtype_convert(match: Match, **kwargs):
|
| 208 |
+
"""Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding"""
|
| 209 |
+
graph = match.graph
|
| 210 |
+
node = match.output_node()
|
| 211 |
+
node.replace_all_uses_with(node.args[0])
|
| 212 |
+
graph.erase_node(node)
|
vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py
ADDED
|
@@ -0,0 +1,786 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import inspect
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from ..._dynamo.utils import counters
|
| 8 |
+
from ..pattern_matcher import (
|
| 9 |
+
filter_nodes,
|
| 10 |
+
fwd_only,
|
| 11 |
+
joint_fwd_bwd,
|
| 12 |
+
register_replacement,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
log = logging.getLogger(__name__)
|
| 16 |
+
aten = torch.ops.aten
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _sfdp_pattern_1(query, key, value, inv_scale):
|
| 20 |
+
return (
|
| 21 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 22 |
+
.div(inv_scale)
|
| 23 |
+
.softmax(dim=-1)
|
| 24 |
+
.matmul(value)
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _sfdp_replacement_1(query, key, value, inv_scale):
|
| 29 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 30 |
+
return aten.scaled_dot_product_attention(
|
| 31 |
+
query.contiguous(),
|
| 32 |
+
key.contiguous(),
|
| 33 |
+
value.contiguous(),
|
| 34 |
+
attn_mask=None,
|
| 35 |
+
dropout_p=0.0,
|
| 36 |
+
is_causal=False,
|
| 37 |
+
scale=1.0 / inv_scale,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _sfdp_pattern_2(query, key, value, scale_factor):
|
| 42 |
+
return (
|
| 43 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 44 |
+
.mul(scale_factor)
|
| 45 |
+
.softmax(dim=-1)
|
| 46 |
+
.matmul(value)
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _sfdp_replacement_2(query, key, value, scale_factor):
|
| 51 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 52 |
+
return aten.scaled_dot_product_attention(
|
| 53 |
+
query.contiguous(),
|
| 54 |
+
key.contiguous(),
|
| 55 |
+
value.contiguous(),
|
| 56 |
+
attn_mask=None,
|
| 57 |
+
dropout_p=0.0,
|
| 58 |
+
is_causal=False,
|
| 59 |
+
scale=scale_factor,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _sfdp_pattern_3(query, key, value, inv_scale_factor, dropout_p):
|
| 64 |
+
return torch.nn.functional.dropout(
|
| 65 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 66 |
+
.div(inv_scale_factor)
|
| 67 |
+
.softmax(dim=-1),
|
| 68 |
+
p=dropout_p,
|
| 69 |
+
).matmul(value)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _sfdp_replacement_3(query, key, value, inv_scale_factor, dropout_p):
|
| 73 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 74 |
+
return aten.scaled_dot_product_attention(
|
| 75 |
+
query.contiguous(),
|
| 76 |
+
key.contiguous(),
|
| 77 |
+
value.contiguous(),
|
| 78 |
+
attn_mask=None,
|
| 79 |
+
dropout_p=dropout_p,
|
| 80 |
+
is_causal=False,
|
| 81 |
+
scale=1.0 / inv_scale_factor,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _sfdp_pattern_4(query, key, value, scale_factor, dropout_p):
|
| 86 |
+
return torch.nn.functional.dropout(
|
| 87 |
+
torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1),
|
| 88 |
+
p=dropout_p,
|
| 89 |
+
).matmul(value)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _sfdp_replacement_4(query, key, value, scale_factor, dropout_p):
|
| 93 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 94 |
+
return aten.scaled_dot_product_attention(
|
| 95 |
+
query.contiguous(),
|
| 96 |
+
key.contiguous(),
|
| 97 |
+
value.contiguous(),
|
| 98 |
+
attn_mask=None,
|
| 99 |
+
dropout_p=dropout_p,
|
| 100 |
+
is_causal=False,
|
| 101 |
+
scale=scale_factor,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _sfdp_pattern_5(query, key, value, attn_mask):
|
| 106 |
+
attn_weight = torch.softmax(
|
| 107 |
+
(query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
|
| 108 |
+
)
|
| 109 |
+
# attn_weight = torch.dropout(attn_weight, dropout_p)
|
| 110 |
+
return attn_weight @ value
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _sfdp_replacement_5(query, key, value, attn_mask):
|
| 114 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 115 |
+
return aten.scaled_dot_product_attention(
|
| 116 |
+
query.contiguous(),
|
| 117 |
+
key.contiguous(),
|
| 118 |
+
value.contiguous(),
|
| 119 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 120 |
+
dropout_p=0.0,
|
| 121 |
+
is_causal=False,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _sfdp_pattern_6(query, key, value, attn_mask, dropout_p):
|
| 126 |
+
attn_weight = torch.softmax(
|
| 127 |
+
(query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
|
| 128 |
+
)
|
| 129 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 130 |
+
return attn_weight @ value
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _sfdp_replacement_6(query, key, value, attn_mask, dropout_p):
|
| 134 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 135 |
+
return aten.scaled_dot_product_attention(
|
| 136 |
+
query.contiguous(),
|
| 137 |
+
key.contiguous(),
|
| 138 |
+
value.contiguous(),
|
| 139 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 140 |
+
dropout_p=dropout_p,
|
| 141 |
+
is_causal=False,
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _sfdp_pattern_7(query, key, value, dropout_p):
|
| 146 |
+
# in real workloads inputs to matmul are permuted
|
| 147 |
+
# causing matmul to expand to a series of expand and clone calls
|
| 148 |
+
# we want the same to happen during pattern tracing
|
| 149 |
+
q = query.permute(0, 2, 1, 3)
|
| 150 |
+
k = key.permute(0, 2, 1, 3)
|
| 151 |
+
v = value.permute(0, 2, 1, 3)
|
| 152 |
+
div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
|
| 153 |
+
div = div.to(torch.float32)
|
| 154 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 155 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 156 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 157 |
+
return attn_weight @ v
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _sfdp_replacement_7(query, key, value, dropout_p):
|
| 161 |
+
# sdpa prefers inputs in permuted format
|
| 162 |
+
# it makes a copy to put them in this format
|
| 163 |
+
# if they aren't already
|
| 164 |
+
# to make replacement efficient ensure that inputs to sdpa
|
| 165 |
+
# are in required order
|
| 166 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 167 |
+
q = query.permute(0, 2, 1, 3)
|
| 168 |
+
k = key.permute(0, 2, 1, 3)
|
| 169 |
+
v = value.permute(0, 2, 1, 3)
|
| 170 |
+
return aten.scaled_dot_product_attention(
|
| 171 |
+
q,
|
| 172 |
+
k,
|
| 173 |
+
v,
|
| 174 |
+
attn_mask=None, # attn_mask,
|
| 175 |
+
dropout_p=dropout_p,
|
| 176 |
+
is_causal=False,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def _sfdp_pattern_8(query, key, value):
|
| 181 |
+
# no dropout version of pattern 7
|
| 182 |
+
q = query.permute(0, 2, 1, 3)
|
| 183 |
+
k = key.permute(0, 2, 1, 3)
|
| 184 |
+
v = value.permute(0, 2, 1, 3)
|
| 185 |
+
div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
|
| 186 |
+
div = div.to(torch.float32)
|
| 187 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 188 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 189 |
+
return attn_weight @ v
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _sfdp_replacement_8(query, key, value):
|
| 193 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 194 |
+
q = query.permute(0, 2, 1, 3)
|
| 195 |
+
k = key.permute(0, 2, 1, 3)
|
| 196 |
+
v = value.permute(0, 2, 1, 3)
|
| 197 |
+
return aten.scaled_dot_product_attention(
|
| 198 |
+
q,
|
| 199 |
+
k,
|
| 200 |
+
v,
|
| 201 |
+
attn_mask=None, # attn_mask,
|
| 202 |
+
dropout_p=0.0,
|
| 203 |
+
is_causal=False,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def _sfdp_pattern_9(query, key, value, dropout_p):
|
| 208 |
+
q = query.permute(0, 2, 1, 3)
|
| 209 |
+
k = key.permute(0, 2, 1, 3)
|
| 210 |
+
v = value.permute(0, 2, 1, 3)
|
| 211 |
+
q = q / math.sqrt(q.size(-1))
|
| 212 |
+
div = q @ k.transpose(-2, -1)
|
| 213 |
+
div = div.to(torch.float32)
|
| 214 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 215 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 216 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 217 |
+
return attn_weight @ v
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _sfdp_replacement_9(query, key, value, dropout_p):
|
| 221 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 222 |
+
q = query.permute(0, 2, 1, 3)
|
| 223 |
+
k = key.permute(0, 2, 1, 3)
|
| 224 |
+
v = value.permute(0, 2, 1, 3)
|
| 225 |
+
return aten.scaled_dot_product_attention(
|
| 226 |
+
q,
|
| 227 |
+
k,
|
| 228 |
+
v,
|
| 229 |
+
attn_mask=None, # attn_mask,
|
| 230 |
+
dropout_p=dropout_p,
|
| 231 |
+
is_causal=False,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def _sfdp_pattern_10(query, key, value):
|
| 236 |
+
# no dropout version of 9
|
| 237 |
+
q = query.permute(0, 2, 1, 3)
|
| 238 |
+
k = key.permute(0, 2, 1, 3)
|
| 239 |
+
v = value.permute(0, 2, 1, 3)
|
| 240 |
+
q = q / math.sqrt(q.size(-1))
|
| 241 |
+
div = q @ k.transpose(-2, -1)
|
| 242 |
+
div = div.to(torch.float32)
|
| 243 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 244 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 245 |
+
return attn_weight @ v
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def _sfdp_replacement_10(query, key, value):
|
| 249 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 250 |
+
q = query.permute(0, 2, 1, 3)
|
| 251 |
+
k = key.permute(0, 2, 1, 3)
|
| 252 |
+
v = value.permute(0, 2, 1, 3)
|
| 253 |
+
return aten.scaled_dot_product_attention(
|
| 254 |
+
q,
|
| 255 |
+
k,
|
| 256 |
+
v,
|
| 257 |
+
attn_mask=None, # attn_mask,
|
| 258 |
+
dropout_p=0.0,
|
| 259 |
+
is_causal=False,
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def _sfdp_pattern_11(query, key, value, inv_scale):
|
| 264 |
+
# Mainly for huggingface models
|
| 265 |
+
q = query.permute(0, 2, 1, 3)
|
| 266 |
+
k = key.permute(0, 2, 1, 3)
|
| 267 |
+
v = value.permute(0, 2, 1, 3)
|
| 268 |
+
return torch.matmul(q, k.transpose(-2, -1)).div(inv_scale).softmax(dim=-1).matmul(v)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _sfdp_replacement_11(query, key, value, inv_scale):
|
| 272 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 273 |
+
return aten.scaled_dot_product_attention(
|
| 274 |
+
query.transpose(1, 2),
|
| 275 |
+
key.transpose(1, 2),
|
| 276 |
+
value.transpose(1, 2),
|
| 277 |
+
attn_mask=None,
|
| 278 |
+
dropout_p=0.0,
|
| 279 |
+
is_causal=False,
|
| 280 |
+
scale=1.0 / inv_scale,
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _sfdp_pattern_12(query, key, value, inv_scale_factor, dropout_p):
|
| 285 |
+
q = query.permute(0, 2, 1, 3)
|
| 286 |
+
k = key.permute(0, 2, 1, 3)
|
| 287 |
+
v = value.permute(0, 2, 1, 3)
|
| 288 |
+
return torch.nn.functional.dropout(
|
| 289 |
+
torch.matmul(q, k.transpose(-2, -1)).div(inv_scale_factor).softmax(dim=-1),
|
| 290 |
+
p=dropout_p,
|
| 291 |
+
).matmul(v)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _sfdp_replacement_12(query, key, value, inv_scale_factor, dropout_p):
|
| 295 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 296 |
+
return aten.scaled_dot_product_attention(
|
| 297 |
+
query.transpose(1, 2),
|
| 298 |
+
key.transpose(1, 2),
|
| 299 |
+
value.transpose(1, 2),
|
| 300 |
+
attn_mask=None,
|
| 301 |
+
dropout_p=dropout_p,
|
| 302 |
+
is_causal=False,
|
| 303 |
+
scale=1.0 / inv_scale_factor,
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def _sfdp_pattern_13(query, key, value, dropout_p):
|
| 308 |
+
attn_weight = torch.bmm(query, key.transpose(1, 2)).softmax(dim=-1)
|
| 309 |
+
attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p)
|
| 310 |
+
return torch.bmm(attn_weight, value)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _sfdp_replacement_13(query, key, value, dropout_p):
|
| 314 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 315 |
+
return aten.scaled_dot_product_attention(
|
| 316 |
+
query.unsqueeze(0),
|
| 317 |
+
key.unsqueeze(0),
|
| 318 |
+
value.unsqueeze(0),
|
| 319 |
+
dropout_p=dropout_p,
|
| 320 |
+
scale=1.0,
|
| 321 |
+
).squeeze(0)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def _sfdp_pattern_14(query, key, value, attn_mask, inv_scale):
|
| 325 |
+
# for BertLarge
|
| 326 |
+
# Permutations are needed to create clones in graph.
|
| 327 |
+
q = query.permute([0, 2, 1, 3])
|
| 328 |
+
k = key.permute([0, 2, 1, 3])
|
| 329 |
+
v = value.permute([0, 2, 1, 3])
|
| 330 |
+
return (
|
| 331 |
+
(torch.matmul(q, k.transpose(-2, -1)).div(inv_scale) + attn_mask)
|
| 332 |
+
.softmax(dim=-1)
|
| 333 |
+
.matmul(v)
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def _sfdp_replacement_14(query, key, value, attn_mask, inv_scale):
|
| 338 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 339 |
+
return aten.scaled_dot_product_attention(
|
| 340 |
+
query.transpose(1, 2),
|
| 341 |
+
key.transpose(1, 2),
|
| 342 |
+
value.transpose(1, 2),
|
| 343 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 344 |
+
dropout_p=0.0,
|
| 345 |
+
is_causal=False,
|
| 346 |
+
scale=1.0 / inv_scale,
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _sfdp_pattern_15(query, key, value, attn_mask, inv_scale):
|
| 351 |
+
# for DistilBert
|
| 352 |
+
# Permutations are needed to create clones in graph.
|
| 353 |
+
q = query.permute([0, 2, 1, 3])
|
| 354 |
+
k = key.permute([0, 2, 1, 3])
|
| 355 |
+
v = value.permute([0, 2, 1, 3])
|
| 356 |
+
bs = q.size(0)
|
| 357 |
+
k_len = k.size(-2)
|
| 358 |
+
scores = q @ k.transpose(-2, -1)
|
| 359 |
+
scores = scores.div(inv_scale)
|
| 360 |
+
fill_value = torch.full((), -float("inf"), dtype=query.dtype, device=query.device)
|
| 361 |
+
attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
|
| 362 |
+
return torch.softmax(scores.masked_fill(attn_mask, fill_value), dim=-1) @ v
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
def _sfdp_replacement_15(query, key, value, attn_mask, inv_scale):
|
| 366 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 367 |
+
bs = query.size(0)
|
| 368 |
+
n_head = query.size(2)
|
| 369 |
+
q_len = query.size(1)
|
| 370 |
+
k_len = key.size(1)
|
| 371 |
+
# do attn_mask->logical_not() in aten.scaled_dot_product_attention
|
| 372 |
+
attn_mask = (
|
| 373 |
+
(attn_mask == 1).view((bs, 1, 1, k_len)).expand((bs, n_head, q_len, k_len))
|
| 374 |
+
)
|
| 375 |
+
return aten.scaled_dot_product_attention(
|
| 376 |
+
query.transpose(1, 2),
|
| 377 |
+
key.transpose(1, 2),
|
| 378 |
+
value.transpose(1, 2),
|
| 379 |
+
attn_mask=attn_mask.to(dtype=torch.bool),
|
| 380 |
+
dropout_p=0.0,
|
| 381 |
+
is_causal=False,
|
| 382 |
+
scale=1.0 / inv_scale,
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def _sfdp_pattern_16(query, key, value, attn_mask, inv_scale, dropout_p):
|
| 387 |
+
# for BertLarge with dropout
|
| 388 |
+
q = query.permute([0, 2, 1, 3])
|
| 389 |
+
k = key.permute([0, 2, 1, 3])
|
| 390 |
+
v = value.permute([0, 2, 1, 3])
|
| 391 |
+
return (
|
| 392 |
+
torch.nn.functional.dropout(
|
| 393 |
+
(torch.matmul(q, k.transpose(-2, -1)).div(inv_scale) + attn_mask).softmax(
|
| 394 |
+
dim=-1
|
| 395 |
+
),
|
| 396 |
+
dropout_p,
|
| 397 |
+
)
|
| 398 |
+
.to(dtype=query.dtype)
|
| 399 |
+
.matmul(v)
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def _sfdp_replacement_16(query, key, value, attn_mask, inv_scale, dropout_p):
|
| 404 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 405 |
+
return aten.scaled_dot_product_attention(
|
| 406 |
+
query.transpose(1, 2),
|
| 407 |
+
key.transpose(1, 2),
|
| 408 |
+
value.transpose(1, 2),
|
| 409 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 410 |
+
dropout_p=dropout_p,
|
| 411 |
+
is_causal=False,
|
| 412 |
+
scale=1.0 / inv_scale,
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def _sfdp_pattern_17(query, key, value, attn_mask, inv_scale, dropout_p):
|
| 417 |
+
# for DistilBert with dropout
|
| 418 |
+
q = query.permute([0, 2, 1, 3])
|
| 419 |
+
k = key.permute([0, 2, 1, 3])
|
| 420 |
+
v = value.permute([0, 2, 1, 3])
|
| 421 |
+
bs = q.size(0)
|
| 422 |
+
k_len = k.size(-2)
|
| 423 |
+
scores = q @ k.transpose(-2, -1)
|
| 424 |
+
scores = scores.div(inv_scale)
|
| 425 |
+
fill_value = torch.full((), -float("inf"), dtype=query.dtype, device=query.device)
|
| 426 |
+
attn_mask = (attn_mask == 0).view((bs, 1, 1, k_len)).expand_as(scores)
|
| 427 |
+
return (
|
| 428 |
+
torch.nn.functional.dropout(
|
| 429 |
+
torch.softmax(scores.masked_fill(attn_mask, fill_value), dim=-1), dropout_p
|
| 430 |
+
)
|
| 431 |
+
@ v
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def _sfdp_replacement_17(query, key, value, attn_mask, inv_scale, dropout_p):
|
| 436 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 437 |
+
bs = query.size(0)
|
| 438 |
+
n_head = query.size(2)
|
| 439 |
+
q_len = query.size(1)
|
| 440 |
+
k_len = key.size(1)
|
| 441 |
+
# do attn_mask->logical_not() in aten.scaled_dot_product_attention
|
| 442 |
+
attn_mask = (
|
| 443 |
+
(attn_mask == 1).view((bs, 1, 1, k_len)).expand((bs, n_head, q_len, k_len))
|
| 444 |
+
)
|
| 445 |
+
return aten.scaled_dot_product_attention(
|
| 446 |
+
query.transpose(1, 2),
|
| 447 |
+
key.transpose(1, 2),
|
| 448 |
+
value.transpose(1, 2),
|
| 449 |
+
attn_mask=attn_mask.to(dtype=torch.bool),
|
| 450 |
+
dropout_p=dropout_p,
|
| 451 |
+
is_causal=False,
|
| 452 |
+
scale=1.0 / inv_scale,
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def _sfdp_params_check(match):
|
| 457 |
+
assert all(k in match.kwargs for k in ("query", "key", "value"))
|
| 458 |
+
query = match.kwargs["query"].meta["val"]
|
| 459 |
+
key = match.kwargs["key"].meta["val"]
|
| 460 |
+
value = match.kwargs["value"].meta["val"]
|
| 461 |
+
if not (query.dtype == key.dtype == value.dtype) or not (
|
| 462 |
+
query.device == key.device == value.device
|
| 463 |
+
):
|
| 464 |
+
return False
|
| 465 |
+
add_mask_node = filter_nodes(match.nodes, aten.add.Tensor)
|
| 466 |
+
# Has attn_mask add.
|
| 467 |
+
if len(add_mask_node) > 0:
|
| 468 |
+
attn_mask_node = add_mask_node[0].args[1]
|
| 469 |
+
# attn_mask_node may be a float/int number.
|
| 470 |
+
if not hasattr(attn_mask_node, "meta"):
|
| 471 |
+
return False
|
| 472 |
+
attn_mask = attn_mask_node.meta["val"] # type: ignore[union-attr]
|
| 473 |
+
# Make sure attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool
|
| 474 |
+
# attn_mask.dtype == torch.float for models like albert.
|
| 475 |
+
if (
|
| 476 |
+
not isinstance(attn_mask, torch.Tensor)
|
| 477 |
+
or not (
|
| 478 |
+
attn_mask.dtype == query.dtype
|
| 479 |
+
or attn_mask.dtype == torch.bool
|
| 480 |
+
or attn_mask.dtype == torch.float
|
| 481 |
+
)
|
| 482 |
+
or query.device != attn_mask.device
|
| 483 |
+
):
|
| 484 |
+
return False
|
| 485 |
+
return True
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def _sfdp_extra_check(scale_factor_op, disable_cuda=False):
|
| 489 |
+
def fn(match):
|
| 490 |
+
scale_factor_node = filter_nodes(match.nodes, scale_factor_op)[0]
|
| 491 |
+
# Note: args[1] of the scale_factor_node is always the scale_factor for the current patterns.
|
| 492 |
+
scale_factor = scale_factor_node.args[1]
|
| 493 |
+
# make sure the scale_factor a float/int. SymInt?
|
| 494 |
+
if not isinstance(scale_factor, (float, int)):
|
| 495 |
+
return False
|
| 496 |
+
if (
|
| 497 |
+
disable_cuda
|
| 498 |
+
and "query" in match.kwargs
|
| 499 |
+
and "cuda" in str(match.kwargs["query"].meta["val"].device)
|
| 500 |
+
):
|
| 501 |
+
return False
|
| 502 |
+
return _sfdp_params_check(match)
|
| 503 |
+
|
| 504 |
+
return fn
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def partialize_and_update_signature(func, **kwargs):
|
| 508 |
+
"""
|
| 509 |
+
Equivalent to functools.partial but also updates the signature on returned function
|
| 510 |
+
"""
|
| 511 |
+
original_sig = inspect.signature(func)
|
| 512 |
+
parameters = original_sig.parameters
|
| 513 |
+
|
| 514 |
+
new_parameters = {
|
| 515 |
+
key: value for key, value in parameters.items() if key not in kwargs
|
| 516 |
+
}
|
| 517 |
+
new_sig = inspect.Signature(parameters=list(new_parameters.values()))
|
| 518 |
+
|
| 519 |
+
partial_func = functools.partial(func, **kwargs)
|
| 520 |
+
|
| 521 |
+
def wrapper(*args, **kwargs):
|
| 522 |
+
return partial_func(*args, **kwargs)
|
| 523 |
+
|
| 524 |
+
wrapper.__signature__ = new_sig # type: ignore[attr-defined]
|
| 525 |
+
wrapper.__name__ = func.__name__
|
| 526 |
+
|
| 527 |
+
return wrapper
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def _get_sfdp_patterns():
|
| 531 |
+
from .joint_graph import patterns
|
| 532 |
+
|
| 533 |
+
if torch.cuda.is_available():
|
| 534 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 535 |
+
device = "cuda"
|
| 536 |
+
else:
|
| 537 |
+
device = "cpu"
|
| 538 |
+
|
| 539 |
+
# sizes/values don't actually matter for initial trace
|
| 540 |
+
# once we get a possible match we re-trace with the actual values and verify the match still holds
|
| 541 |
+
g_inp = functools.partial(
|
| 542 |
+
torch.empty, (2, 4, 8, 16), device=device, requires_grad=True
|
| 543 |
+
)
|
| 544 |
+
# attn_mask
|
| 545 |
+
b_inp = functools.partial(torch.empty, (1, 1, 8, 8), device=device)
|
| 546 |
+
m_inp = functools.partial(torch.empty, (2, 1, 1, 4), device=device)
|
| 547 |
+
# inv_scale
|
| 548 |
+
c_inp = functools.partial(torch.tensor, 2.0, device=device)
|
| 549 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 550 |
+
# 0.113377 is a "magic" value that lets us recover the lost input arg relationship
|
| 551 |
+
d = {"dropout_p": 0.113377}
|
| 552 |
+
|
| 553 |
+
# we could also generate all these patterns in 3d.. TODO
|
| 554 |
+
g_3d_inp = functools.partial(
|
| 555 |
+
torch.empty, (1024, 128, 128), device=device, requires_grad=True
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
# reshape in matmul decomposition generates a clone when batch_size>1 due to the memory layout change.
|
| 559 |
+
# however when batch_size=1, reshape does not change the memory layout, so clone would not be generated.
|
| 560 |
+
# here we need to trace with input of batch_size=1 to generate a pattern graph without clone.
|
| 561 |
+
g_bs1_inp = functools.partial(
|
| 562 |
+
torch.empty, (1, 4, 8, 16), device=device, requires_grad=True
|
| 563 |
+
)
|
| 564 |
+
m_bs1_inp = functools.partial(torch.empty, (1, 1, 1, 4), device=device)
|
| 565 |
+
|
| 566 |
+
# softmax will generate a dtype conversion on inputs if they are in half,
|
| 567 |
+
# but will not in float, so we generate a pattern for both
|
| 568 |
+
for dtype in [torch.float, torch.half]:
|
| 569 |
+
g = functools.partial(g_inp, dtype=dtype)
|
| 570 |
+
b = functools.partial(b_inp, dtype=dtype)
|
| 571 |
+
m = functools.partial(m_inp, dtype=dtype)
|
| 572 |
+
m_float = functools.partial(m_inp, dtype=torch.float)
|
| 573 |
+
c = functools.partial(c_inp, dtype=dtype)
|
| 574 |
+
g_3d = functools.partial(g_3d_inp, dtype=dtype)
|
| 575 |
+
g_bs1 = functools.partial(g_bs1_inp, dtype=dtype)
|
| 576 |
+
m_bs1 = functools.partial(m_bs1_inp, dtype=dtype)
|
| 577 |
+
m_bs1_float = functools.partial(m_bs1_inp, dtype=torch.float)
|
| 578 |
+
|
| 579 |
+
candidates = [
|
| 580 |
+
(
|
| 581 |
+
_sfdp_pattern_1,
|
| 582 |
+
_sfdp_replacement_1,
|
| 583 |
+
[g(), g(), g(), c()],
|
| 584 |
+
{},
|
| 585 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 586 |
+
),
|
| 587 |
+
(
|
| 588 |
+
_sfdp_pattern_2,
|
| 589 |
+
_sfdp_replacement_2,
|
| 590 |
+
[g(), g(), g(), c()],
|
| 591 |
+
{},
|
| 592 |
+
_sfdp_extra_check(aten.mul.Tensor),
|
| 593 |
+
),
|
| 594 |
+
(
|
| 595 |
+
_sfdp_pattern_3,
|
| 596 |
+
_sfdp_replacement_3,
|
| 597 |
+
[g(), g(), g(), c()],
|
| 598 |
+
d,
|
| 599 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 600 |
+
),
|
| 601 |
+
(
|
| 602 |
+
_sfdp_pattern_4,
|
| 603 |
+
_sfdp_replacement_4,
|
| 604 |
+
[g(), g(), g(), c()],
|
| 605 |
+
d,
|
| 606 |
+
_sfdp_extra_check(aten.mul.Tensor),
|
| 607 |
+
),
|
| 608 |
+
(
|
| 609 |
+
_sfdp_pattern_5,
|
| 610 |
+
_sfdp_replacement_5,
|
| 611 |
+
[g(), g(), g(), b()],
|
| 612 |
+
{},
|
| 613 |
+
_sfdp_params_check,
|
| 614 |
+
),
|
| 615 |
+
(
|
| 616 |
+
_sfdp_pattern_6,
|
| 617 |
+
_sfdp_replacement_6,
|
| 618 |
+
[g(), g(), g(), b()],
|
| 619 |
+
d,
|
| 620 |
+
_sfdp_params_check,
|
| 621 |
+
),
|
| 622 |
+
(
|
| 623 |
+
_sfdp_pattern_7,
|
| 624 |
+
_sfdp_replacement_7,
|
| 625 |
+
[g(), g(), g()],
|
| 626 |
+
d,
|
| 627 |
+
_sfdp_params_check,
|
| 628 |
+
),
|
| 629 |
+
(
|
| 630 |
+
_sfdp_pattern_8,
|
| 631 |
+
_sfdp_replacement_8,
|
| 632 |
+
[g(), g(), g()],
|
| 633 |
+
{},
|
| 634 |
+
_sfdp_params_check,
|
| 635 |
+
),
|
| 636 |
+
(
|
| 637 |
+
_sfdp_pattern_9,
|
| 638 |
+
_sfdp_replacement_9,
|
| 639 |
+
[g(), g(), g()],
|
| 640 |
+
d,
|
| 641 |
+
_sfdp_params_check,
|
| 642 |
+
),
|
| 643 |
+
(
|
| 644 |
+
_sfdp_pattern_10,
|
| 645 |
+
_sfdp_replacement_10,
|
| 646 |
+
[g(), g(), g()],
|
| 647 |
+
{},
|
| 648 |
+
_sfdp_params_check,
|
| 649 |
+
),
|
| 650 |
+
(
|
| 651 |
+
_sfdp_pattern_11,
|
| 652 |
+
_sfdp_replacement_11,
|
| 653 |
+
[g(), g(), g(), c()],
|
| 654 |
+
{},
|
| 655 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 656 |
+
),
|
| 657 |
+
(
|
| 658 |
+
_sfdp_pattern_12,
|
| 659 |
+
_sfdp_replacement_12,
|
| 660 |
+
[g(), g(), g(), c()],
|
| 661 |
+
d,
|
| 662 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 663 |
+
),
|
| 664 |
+
(
|
| 665 |
+
_sfdp_pattern_13,
|
| 666 |
+
_sfdp_replacement_13,
|
| 667 |
+
[g_3d(), g_3d(), g_3d()],
|
| 668 |
+
d,
|
| 669 |
+
_sfdp_params_check,
|
| 670 |
+
),
|
| 671 |
+
(
|
| 672 |
+
_sfdp_pattern_14,
|
| 673 |
+
_sfdp_replacement_14,
|
| 674 |
+
[g(), g(), g(), m(), c()],
|
| 675 |
+
{},
|
| 676 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 677 |
+
),
|
| 678 |
+
(
|
| 679 |
+
_sfdp_pattern_15,
|
| 680 |
+
_sfdp_replacement_15,
|
| 681 |
+
[g(), g(), g(), m(), c()],
|
| 682 |
+
{},
|
| 683 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 684 |
+
),
|
| 685 |
+
# TODO: Enable CUDA after solving Bert accuracy issue of calling efficient attention
|
| 686 |
+
(
|
| 687 |
+
_sfdp_pattern_16,
|
| 688 |
+
_sfdp_replacement_16,
|
| 689 |
+
[g(), g(), g(), m(), c()],
|
| 690 |
+
d,
|
| 691 |
+
_sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
|
| 692 |
+
),
|
| 693 |
+
(
|
| 694 |
+
_sfdp_pattern_16,
|
| 695 |
+
_sfdp_replacement_16,
|
| 696 |
+
[g_bs1(), g_bs1(), g_bs1(), m_bs1(), c()],
|
| 697 |
+
d,
|
| 698 |
+
_sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
|
| 699 |
+
),
|
| 700 |
+
(
|
| 701 |
+
_sfdp_pattern_17,
|
| 702 |
+
_sfdp_replacement_17,
|
| 703 |
+
[g(), g(), g(), m(), c()],
|
| 704 |
+
d,
|
| 705 |
+
_sfdp_extra_check(aten.div.Tensor),
|
| 706 |
+
),
|
| 707 |
+
]
|
| 708 |
+
mask_fp32_patterns = ["pattern_16"]
|
| 709 |
+
if dtype == torch.half:
|
| 710 |
+
# Add inputs of bf16 q/k/v and fp32 mask, for models like albert.
|
| 711 |
+
candidates.append(
|
| 712 |
+
(
|
| 713 |
+
_sfdp_pattern_16,
|
| 714 |
+
_sfdp_replacement_16,
|
| 715 |
+
[g(), g(), g(), m_float(), c()],
|
| 716 |
+
d,
|
| 717 |
+
_sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
|
| 718 |
+
)
|
| 719 |
+
)
|
| 720 |
+
candidates.append(
|
| 721 |
+
(
|
| 722 |
+
_sfdp_pattern_16,
|
| 723 |
+
_sfdp_replacement_16,
|
| 724 |
+
[g_bs1(), g_bs1(), g_bs1(), m_bs1_float(), c()],
|
| 725 |
+
d,
|
| 726 |
+
_sfdp_extra_check(aten.div.Tensor, disable_cuda=True),
|
| 727 |
+
)
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
for pattern, replacement, args, workaround, extra_check in candidates:
|
| 731 |
+
# XXX: when adding a new pattern, re-run `gen_attention_patterns` so the pattern
|
| 732 |
+
# gets serialized to a python file and does not require tracing at runtime.
|
| 733 |
+
assert isinstance(workaround, dict)
|
| 734 |
+
name = pattern.__name__
|
| 735 |
+
|
| 736 |
+
if dtype != torch.float:
|
| 737 |
+
name += "_half"
|
| 738 |
+
if (
|
| 739 |
+
any(p in name for p in mask_fp32_patterns)
|
| 740 |
+
and args[3].dtype == torch.float32
|
| 741 |
+
):
|
| 742 |
+
name += "_mask_fp32"
|
| 743 |
+
if args[0].size(0) == 1:
|
| 744 |
+
name += "_bs1"
|
| 745 |
+
|
| 746 |
+
training_name = name + "_training"
|
| 747 |
+
yield training_name, {
|
| 748 |
+
"search_fn": pattern,
|
| 749 |
+
"replace_fn": replacement,
|
| 750 |
+
"example_inputs": args,
|
| 751 |
+
"trace_fn": joint_fwd_bwd,
|
| 752 |
+
"pass_dicts": patterns,
|
| 753 |
+
"extra_check": extra_check,
|
| 754 |
+
"scalar_workaround": workaround,
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
if workaround:
|
| 758 |
+
assert len(workaround) == 1 and "dropout_p" in workaround
|
| 759 |
+
# functools.partial insufficient because we look at signature downstream
|
| 760 |
+
pattern = partialize_and_update_signature(pattern, dropout_p=0.0)
|
| 761 |
+
replacement = partialize_and_update_signature(
|
| 762 |
+
replacement, dropout_p=0.0
|
| 763 |
+
)
|
| 764 |
+
workaround = {}
|
| 765 |
+
|
| 766 |
+
inference_name = name + "_inference"
|
| 767 |
+
yield inference_name, {
|
| 768 |
+
"search_fn": pattern,
|
| 769 |
+
"replace_fn": replacement,
|
| 770 |
+
"example_inputs": args,
|
| 771 |
+
"trace_fn": fwd_only,
|
| 772 |
+
"pass_dicts": patterns,
|
| 773 |
+
"extra_check": extra_check,
|
| 774 |
+
"scalar_workaround": workaround,
|
| 775 |
+
}
|
| 776 |
+
|
| 777 |
+
|
| 778 |
+
@functools.lru_cache(None)
|
| 779 |
+
def _sfdp_init():
|
| 780 |
+
from .serialized_patterns.central_index import get_serialized_pattern
|
| 781 |
+
|
| 782 |
+
for key, register_replacement_kwargs in _get_sfdp_patterns():
|
| 783 |
+
search_fn_pattern = get_serialized_pattern(key)
|
| 784 |
+
register_replacement(
|
| 785 |
+
**register_replacement_kwargs, search_fn_pattern=search_fn_pattern
|
| 786 |
+
)
|
vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py
ADDED
|
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import logging
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from torch._dynamo.utils import counters, detect_fake_mode, optimus_scuba_log
|
| 8 |
+
from torch._utils_internal import upload_graph
|
| 9 |
+
from torch.fx.experimental.optimization import (
|
| 10 |
+
matches_module_pattern,
|
| 11 |
+
replace_node_module,
|
| 12 |
+
)
|
| 13 |
+
from torch.fx.passes.shape_prop import ShapeProp
|
| 14 |
+
from torch.nn import functional as F
|
| 15 |
+
from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
|
| 16 |
+
|
| 17 |
+
from .. import config
|
| 18 |
+
|
| 19 |
+
from ..fx_utils import matches_module_function_pattern
|
| 20 |
+
from ..pattern_matcher import (
|
| 21 |
+
init_once_fakemode,
|
| 22 |
+
PatternMatcherPass,
|
| 23 |
+
stable_topological_sort,
|
| 24 |
+
)
|
| 25 |
+
from ..utils import is_cpu_device, pass_execution_and_save
|
| 26 |
+
from .group_batch_fusion import group_batch_fusion_passes
|
| 27 |
+
from .misc_patterns import numpy_compat_normalization
|
| 28 |
+
|
| 29 |
+
log = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
normalization_pass = PatternMatcherPass(
|
| 32 |
+
prevent_match_across_mutations=True, pass_name="normalization_pass"
|
| 33 |
+
)
|
| 34 |
+
merge_splits_pass = PatternMatcherPass(
|
| 35 |
+
prevent_match_across_mutations=True, pass_name="merge_splits_pass"
|
| 36 |
+
)
|
| 37 |
+
split_cat_pass = PatternMatcherPass(
|
| 38 |
+
prevent_match_across_mutations=True, pass_name="split_cat_pass"
|
| 39 |
+
)
|
| 40 |
+
unbind_stack_pass = PatternMatcherPass(
|
| 41 |
+
prevent_match_across_mutations=True, pass_name="unbind_stack_pass"
|
| 42 |
+
)
|
| 43 |
+
efficient_conv_bn_eval_pass = PatternMatcherPass(
|
| 44 |
+
prevent_match_across_mutations=True, pass_name="efficient_conv_bn_eval_pass"
|
| 45 |
+
)
|
| 46 |
+
merge_getitem_cat_pass = PatternMatcherPass(
|
| 47 |
+
prevent_match_across_mutations=True, pass_name="merge_getitem_cat_pass"
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
fuse_split_linear_add_pass = PatternMatcherPass(
|
| 51 |
+
prevent_match_across_mutations=True,
|
| 52 |
+
pass_name="fuse_split_linear_add_pass",
|
| 53 |
+
)
|
| 54 |
+
fuse_chunk_squeeze_cat_pass = PatternMatcherPass(
|
| 55 |
+
prevent_match_across_mutations=True,
|
| 56 |
+
pass_name="fuse_chunk_squeeze_cat_pass",
|
| 57 |
+
)
|
| 58 |
+
remove_reshape_pass = PatternMatcherPass(
|
| 59 |
+
prevent_match_across_mutations=True,
|
| 60 |
+
pass_name="remove_reshape_pass",
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# based on predispatch aten IR
|
| 64 |
+
normalization_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 65 |
+
merge_splits_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 66 |
+
split_cat_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 67 |
+
unbind_stack_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 68 |
+
merge_getitem_cat_pass_aten = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def fuse_parallel_linear_pass(graph):
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def remove_split_ops(graph, shape_prop):
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
pattern_matcher_passes: List[PatternMatcherPass] = [
|
| 80 |
+
normalization_pass,
|
| 81 |
+
merge_getitem_cat_pass,
|
| 82 |
+
merge_splits_pass,
|
| 83 |
+
split_cat_pass,
|
| 84 |
+
unbind_stack_pass,
|
| 85 |
+
efficient_conv_bn_eval_pass,
|
| 86 |
+
]
|
| 87 |
+
pattern_matcher_passes_aten: List[PatternMatcherPass] = [
|
| 88 |
+
merge_getitem_cat_pass_aten,
|
| 89 |
+
merge_splits_pass_aten,
|
| 90 |
+
split_cat_pass_aten,
|
| 91 |
+
unbind_stack_pass_aten,
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@init_once_fakemode
|
| 96 |
+
def lazy_init():
|
| 97 |
+
from . import efficient_conv_bn_eval, split_cat # noqa: F401 # noqa: F401
|
| 98 |
+
|
| 99 |
+
if config.is_fbcode():
|
| 100 |
+
from . import fb # type: ignore[attr-defined] # noqa: F401
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
|
| 104 |
+
"""
|
| 105 |
+
Apply passes on the input FX graph using Torch IR.
|
| 106 |
+
|
| 107 |
+
WARNING:
|
| 108 |
+
The IR before grad is not functional or normalized, so it is harder
|
| 109 |
+
to write passes on this IR. Passes must be safe with respect to
|
| 110 |
+
aliasing and mutation and need to handle all possible arg schemas.
|
| 111 |
+
|
| 112 |
+
Consider adding a new pass to post_grad.py or joint_graph.py which
|
| 113 |
+
are after functionalization and normalization.
|
| 114 |
+
"""
|
| 115 |
+
if config.pattern_matcher:
|
| 116 |
+
lazy_init()
|
| 117 |
+
if hasattr(
|
| 118 |
+
config, "fx_passes_numeric_check"
|
| 119 |
+
) and config.fx_passes_numeric_check.get("pre_grad", False):
|
| 120 |
+
gm_before_fx_passes = gm.__copy__()
|
| 121 |
+
# explicitly run with predispatch atenIR based passes
|
| 122 |
+
if config.is_predispatch:
|
| 123 |
+
|
| 124 |
+
def shape_prop(mod) -> None:
|
| 125 |
+
ShapeProp(
|
| 126 |
+
gm=mod,
|
| 127 |
+
fake_mode=detect_fake_mode(example_inputs),
|
| 128 |
+
).propagate(*example_inputs)
|
| 129 |
+
|
| 130 |
+
# normalization pass
|
| 131 |
+
pass_execution_and_save(
|
| 132 |
+
normalization_pass_aten.apply,
|
| 133 |
+
gm,
|
| 134 |
+
"[Pre grad(predispatch IR)]Apply normalization pass",
|
| 135 |
+
)
|
| 136 |
+
pass_execution_and_save(
|
| 137 |
+
group_batch_fusion_passes,
|
| 138 |
+
gm,
|
| 139 |
+
"[Pre grad(predispatch IR)] Apply group_batch_fusion",
|
| 140 |
+
)
|
| 141 |
+
pass_execution_and_save(
|
| 142 |
+
fuse_chunk_squeeze_cat_pass.apply,
|
| 143 |
+
gm,
|
| 144 |
+
"[Pre grad(predispatch IR)] Apply fuse_chunk_squeeze_cat_pass",
|
| 145 |
+
)
|
| 146 |
+
pass_execution_and_save(
|
| 147 |
+
fuse_split_linear_add_pass.apply,
|
| 148 |
+
gm,
|
| 149 |
+
"[Pre grad(predispatch IR)] Apply fuse_split_linear_add_pass",
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
log.debug(
|
| 153 |
+
"[Pre grad(predispatch IR)]Before split cat in pre grad pass. graph: %s",
|
| 154 |
+
gm.graph,
|
| 155 |
+
)
|
| 156 |
+
for ind, pattern_matcher_pass_aten in enumerate(
|
| 157 |
+
pattern_matcher_passes_aten
|
| 158 |
+
):
|
| 159 |
+
pass_execution_and_save(
|
| 160 |
+
pattern_matcher_pass_aten.apply,
|
| 161 |
+
gm,
|
| 162 |
+
f"[Pre grad(predispatch IR)]Apply split_cat, index: {ind}",
|
| 163 |
+
)
|
| 164 |
+
pass_execution_and_save(
|
| 165 |
+
remove_reshape_pass.apply,
|
| 166 |
+
gm,
|
| 167 |
+
"[Pre grad(predispatch IR)] Apply remove_reshape_pass",
|
| 168 |
+
)
|
| 169 |
+
pass_execution_and_save(
|
| 170 |
+
fuse_parallel_linear_pass,
|
| 171 |
+
gm,
|
| 172 |
+
"[Pre grad(predispatch IR)] Apply fuse_parallel_linear_pass",
|
| 173 |
+
)
|
| 174 |
+
pass_execution_and_save(
|
| 175 |
+
lambda graph: remove_split_ops(graph.owning_module, shape_prop),
|
| 176 |
+
gm,
|
| 177 |
+
"[Pre grad(predispatch IR)] Apply remove_split_ops",
|
| 178 |
+
)
|
| 179 |
+
shape_prop(gm)
|
| 180 |
+
|
| 181 |
+
else:
|
| 182 |
+
# We only log the graph with changes to avoid the excessive compilation time
|
| 183 |
+
# https://fb.workplace.com/groups/257735836456307/permalink/633533465543207/
|
| 184 |
+
if example_inputs is not None:
|
| 185 |
+
gm = fuse_fx(gm, example_inputs)
|
| 186 |
+
numpy_compat_normalization(gm.graph)
|
| 187 |
+
inductor_before_change = copy.deepcopy(counters["inductor"])
|
| 188 |
+
group_batch_fusion_passes(gm.graph, pre_grad=True)
|
| 189 |
+
if counters["inductor"] != inductor_before_change:
|
| 190 |
+
optimus_scuba_log["group_batch_fusion_pre_grad"] = upload_graph(
|
| 191 |
+
gm.graph
|
| 192 |
+
)
|
| 193 |
+
for pattern_matcher_pass in pattern_matcher_passes:
|
| 194 |
+
inductor_before_change = copy.deepcopy(counters["inductor"])
|
| 195 |
+
pattern_matcher_pass.apply(gm.graph) # type: ignore[arg-type]
|
| 196 |
+
if counters["inductor"] != inductor_before_change:
|
| 197 |
+
optimus_scuba_log[
|
| 198 |
+
f"split_cat_pattern_{pattern_matcher_pass.pass_name}_pre_grad"
|
| 199 |
+
] = upload_graph(gm.graph)
|
| 200 |
+
|
| 201 |
+
if config.pre_grad_custom_pass is not None:
|
| 202 |
+
config.pre_grad_custom_pass(gm.graph)
|
| 203 |
+
stable_topological_sort(gm.graph)
|
| 204 |
+
gm.graph.lint()
|
| 205 |
+
gm.recompile()
|
| 206 |
+
|
| 207 |
+
if (
|
| 208 |
+
config.pattern_matcher
|
| 209 |
+
and hasattr(config, "fx_passes_numeric_check")
|
| 210 |
+
and config.fx_passes_numeric_check.get("pre_grad", False)
|
| 211 |
+
and example_inputs is not None
|
| 212 |
+
):
|
| 213 |
+
from .numeric_utils import numeric_check_if_enabled
|
| 214 |
+
|
| 215 |
+
gm_after_fx_passes = gm.__copy__()
|
| 216 |
+
numeric_check_if_enabled(
|
| 217 |
+
gm_before_fx_passes, # type: ignore[possibly-undefined]
|
| 218 |
+
gm_after_fx_passes,
|
| 219 |
+
example_inputs,
|
| 220 |
+
config.fx_passes_numeric_check.get("num_iterations", 1),
|
| 221 |
+
config.fx_passes_numeric_check.get("precision", 1e-4),
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
return gm
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def fuse_fx(gm: torch.fx.GraphModule, example_inputs) -> torch.fx.GraphModule:
|
| 228 |
+
is_cpu = is_cpu_device(example_inputs)
|
| 229 |
+
|
| 230 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 231 |
+
|
| 232 |
+
gm = sink_cat_after_pointwise(gm)
|
| 233 |
+
if config.permute_fusion and not is_cpu:
|
| 234 |
+
# For linear permute fusion, we need to check input info to identify
|
| 235 |
+
# and perform proper permutation/transpose
|
| 236 |
+
ShapeProp(gm, fake_mode=fake_mode).propagate(*example_inputs)
|
| 237 |
+
gm = linear_permute_fusion(gm)
|
| 238 |
+
gm = permute_linear_fusion(gm)
|
| 239 |
+
gm = permute_matmul_fusion(gm)
|
| 240 |
+
|
| 241 |
+
# make sure the autograd is disabled.
|
| 242 |
+
if torch.is_grad_enabled() or not is_cpu:
|
| 243 |
+
return gm
|
| 244 |
+
if config.freezing:
|
| 245 |
+
gm = remove_identity(gm)
|
| 246 |
+
gm = fuse_conv_bn(gm)
|
| 247 |
+
return gm
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def fetch_attr(target: str, mod):
|
| 251 |
+
target_atoms = target.split(".")
|
| 252 |
+
attr_itr = mod
|
| 253 |
+
for i, atom in enumerate(target_atoms):
|
| 254 |
+
if not hasattr(attr_itr, atom):
|
| 255 |
+
raise RuntimeError(
|
| 256 |
+
f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}"
|
| 257 |
+
)
|
| 258 |
+
attr_itr = getattr(attr_itr, atom)
|
| 259 |
+
return attr_itr
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def remove_identity(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 263 |
+
"""
|
| 264 |
+
Removes all identity layers from the module.
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
class IdentityRemover(torch.fx.Transformer):
|
| 268 |
+
def call_module(self, target, args, kwargs):
|
| 269 |
+
if isinstance(self.submodules[target], nn.Identity):
|
| 270 |
+
assert len(args) == 1
|
| 271 |
+
return args[0]
|
| 272 |
+
else:
|
| 273 |
+
return super().call_module(target, args, kwargs)
|
| 274 |
+
|
| 275 |
+
return IdentityRemover(gm).transform()
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModule:
|
| 279 |
+
"""
|
| 280 |
+
Fuses Convolution/BN layers for inference purposes.
|
| 281 |
+
"""
|
| 282 |
+
modules_patterns = [
|
| 283 |
+
(torch.nn.Conv1d, torch.nn.BatchNorm1d),
|
| 284 |
+
(torch.nn.Conv2d, torch.nn.BatchNorm2d),
|
| 285 |
+
(torch.nn.Conv3d, torch.nn.BatchNorm3d),
|
| 286 |
+
]
|
| 287 |
+
module_function_patterns = [
|
| 288 |
+
(torch.nn.Conv1d, F.batch_norm),
|
| 289 |
+
(torch.nn.Conv2d, F.batch_norm),
|
| 290 |
+
(torch.nn.Conv3d, F.batch_norm),
|
| 291 |
+
]
|
| 292 |
+
modules = dict(gm.named_modules())
|
| 293 |
+
for pattern in modules_patterns:
|
| 294 |
+
for node in gm.graph.nodes:
|
| 295 |
+
if matches_module_pattern(pattern, node, modules):
|
| 296 |
+
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
|
| 297 |
+
continue
|
| 298 |
+
conv = modules[node.args[0].target]
|
| 299 |
+
bn = modules[node.target]
|
| 300 |
+
eval_mode = all(not n.training for n in [conv, bn])
|
| 301 |
+
if not eval_mode:
|
| 302 |
+
continue
|
| 303 |
+
if not bn.track_running_stats:
|
| 304 |
+
continue
|
| 305 |
+
fused_conv = fuse_conv_bn_eval(conv, bn)
|
| 306 |
+
replace_node_module(node.args[0], modules, fused_conv)
|
| 307 |
+
node.replace_all_uses_with(node.args[0])
|
| 308 |
+
gm.graph.erase_node(node)
|
| 309 |
+
gm.graph.lint()
|
| 310 |
+
for pattern in module_function_patterns:
|
| 311 |
+
for node in gm.graph.nodes:
|
| 312 |
+
if matches_module_function_pattern(pattern, node, modules):
|
| 313 |
+
# TODO: support kwargs.
|
| 314 |
+
if len(node.args) != 8:
|
| 315 |
+
continue
|
| 316 |
+
conv = modules[node.args[0].target]
|
| 317 |
+
bn_training = node.args[5]
|
| 318 |
+
bn_eps = node.args[7]
|
| 319 |
+
if conv.training or bn_training:
|
| 320 |
+
continue
|
| 321 |
+
if type(bn_eps) is not float:
|
| 322 |
+
continue
|
| 323 |
+
bn_args_is_constant = all(
|
| 324 |
+
n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5]
|
| 325 |
+
)
|
| 326 |
+
if not bn_args_is_constant:
|
| 327 |
+
continue
|
| 328 |
+
bn_running_mean = fetch_attr(node.args[1].target, gm)
|
| 329 |
+
bn_running_var = fetch_attr(node.args[2].target, gm)
|
| 330 |
+
bn_weight = fetch_attr(node.args[3].target, gm)
|
| 331 |
+
bn_bias = fetch_attr(node.args[4].target, gm)
|
| 332 |
+
if bn_running_mean is None or bn_running_var is None:
|
| 333 |
+
continue
|
| 334 |
+
fused_conv = copy.deepcopy(conv)
|
| 335 |
+
fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
|
| 336 |
+
fused_conv.weight,
|
| 337 |
+
fused_conv.bias,
|
| 338 |
+
bn_running_mean,
|
| 339 |
+
bn_running_var,
|
| 340 |
+
bn_eps,
|
| 341 |
+
bn_weight,
|
| 342 |
+
bn_bias,
|
| 343 |
+
)
|
| 344 |
+
replace_node_module(node.args[0], modules, fused_conv)
|
| 345 |
+
node.replace_all_uses_with(node.args[0])
|
| 346 |
+
gm.graph.erase_node(node)
|
| 347 |
+
gm.graph.lint()
|
| 348 |
+
gm.recompile()
|
| 349 |
+
|
| 350 |
+
return gm
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
class NormalizedLinearNode:
|
| 354 |
+
def __init__(self, node: torch.fx.Node) -> None:
|
| 355 |
+
assert node.op == "call_function"
|
| 356 |
+
assert node.target in [torch.nn.functional.linear]
|
| 357 |
+
self.node: torch.fx.Node = node
|
| 358 |
+
|
| 359 |
+
def get_input(self) -> torch.fx.Node:
|
| 360 |
+
if len(self.node.args) > 0:
|
| 361 |
+
return self.node.args[0] # type: ignore[return-value]
|
| 362 |
+
else:
|
| 363 |
+
return self.node.kwargs["input"] # type: ignore[return-value]
|
| 364 |
+
|
| 365 |
+
def get_weight(self) -> torch.fx.Node:
|
| 366 |
+
if len(self.node.args) > 1:
|
| 367 |
+
return self.node.args[1] # type: ignore[return-value]
|
| 368 |
+
else:
|
| 369 |
+
return self.node.kwargs["weight"] # type: ignore[return-value]
|
| 370 |
+
|
| 371 |
+
def get_bias(self) -> torch.fx.Node:
|
| 372 |
+
if len(self.node.args) > 2:
|
| 373 |
+
return self.node.args[2] # type: ignore[return-value]
|
| 374 |
+
else:
|
| 375 |
+
return self.node.kwargs["bias"] if "bias" in self.node.kwargs else None # type: ignore[return-value]
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class NormalizedMatmulNode:
|
| 379 |
+
def __init__(self, node: torch.fx.Node) -> None:
|
| 380 |
+
assert node.op == "call_function"
|
| 381 |
+
assert node.target in [torch.bmm, torch.matmul]
|
| 382 |
+
self.node: torch.fx.Node = node
|
| 383 |
+
|
| 384 |
+
def get_input(self) -> torch.fx.Node:
|
| 385 |
+
if len(self.node.args) > 0:
|
| 386 |
+
return self.node.args[0] # type: ignore[return-value]
|
| 387 |
+
else:
|
| 388 |
+
return self.node.kwargs["input"] # type: ignore[return-value]
|
| 389 |
+
|
| 390 |
+
def get_other(self) -> torch.fx.Node:
|
| 391 |
+
if len(self.node.args) > 1:
|
| 392 |
+
return self.node.args[1] # type: ignore[return-value]
|
| 393 |
+
else:
|
| 394 |
+
return self.node.kwargs["other"] # type: ignore[return-value]
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def check_permute(node: torch.fx.Node) -> bool:
|
| 398 |
+
ranks = len(node.meta["tensor_meta"].shape)
|
| 399 |
+
if len(node.args) > 3:
|
| 400 |
+
permutation = [node.args[i] % ranks for i in range(1, ranks + 1)] # type: ignore[operator]
|
| 401 |
+
elif (
|
| 402 |
+
"permutation" in node.kwargs
|
| 403 |
+
and node.kwargs["permutation"] is not None
|
| 404 |
+
and len(node.kwargs["permutation"]) > 2 # type: ignore[arg-type]
|
| 405 |
+
):
|
| 406 |
+
permutation = [i % ranks for i in node.kwargs["permutation"]] # type: ignore[union-attr]
|
| 407 |
+
else:
|
| 408 |
+
return False
|
| 409 |
+
allowed_permutation = list(range(ranks))
|
| 410 |
+
allowed_permutation[-1] = ranks - 2
|
| 411 |
+
allowed_permutation[-2] = ranks - 1
|
| 412 |
+
return permutation == allowed_permutation
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def sink_cat_after_pointwise(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 416 |
+
def one_user(node):
|
| 417 |
+
users = list(node.users)
|
| 418 |
+
return users[0] if len(users) == 1 else None
|
| 419 |
+
|
| 420 |
+
def is_view(node):
|
| 421 |
+
view = {"view"}
|
| 422 |
+
return node.op == "call_method" and node.target in view
|
| 423 |
+
|
| 424 |
+
def is_pointwise_unary(node):
|
| 425 |
+
pointwise = {torch.relu, torch.tanh, "relu", "tanh"}
|
| 426 |
+
return node.op in {"call_function", "call_method"} and node.target in pointwise
|
| 427 |
+
|
| 428 |
+
g = module.graph
|
| 429 |
+
for node in g.nodes:
|
| 430 |
+
if node.op != "call_function" or node.target != torch.cat:
|
| 431 |
+
continue
|
| 432 |
+
|
| 433 |
+
cat_or_view = node
|
| 434 |
+
while True:
|
| 435 |
+
user = one_user(cat_or_view)
|
| 436 |
+
if not user or not is_view(user):
|
| 437 |
+
break
|
| 438 |
+
cat_or_view = user
|
| 439 |
+
|
| 440 |
+
if user and is_pointwise_unary(user):
|
| 441 |
+
with g.inserting_before(node):
|
| 442 |
+
|
| 443 |
+
def cat_args(tensors, dim=0):
|
| 444 |
+
return tensors, dim
|
| 445 |
+
|
| 446 |
+
tensors, dim = cat_args(*node.args, **node.kwargs)
|
| 447 |
+
new_tensors = [
|
| 448 |
+
g.create_node(user.op, user.target, args=(arg,), kwargs=user.kwargs)
|
| 449 |
+
for arg in tensors
|
| 450 |
+
]
|
| 451 |
+
new_cat = g.create_node(
|
| 452 |
+
"call_function", torch.cat, args=(new_tensors, dim)
|
| 453 |
+
)
|
| 454 |
+
user.replace_all_uses_with(cat_or_view)
|
| 455 |
+
node.replace_all_uses_with(new_cat)
|
| 456 |
+
g.erase_node(user)
|
| 457 |
+
g.erase_node(node)
|
| 458 |
+
g.lint()
|
| 459 |
+
module.recompile()
|
| 460 |
+
return module
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def linear_permute_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 464 |
+
for node in module.graph.nodes:
|
| 465 |
+
if (
|
| 466 |
+
node.op == "call_method"
|
| 467 |
+
and node.target == "permute"
|
| 468 |
+
and check_permute(node)
|
| 469 |
+
):
|
| 470 |
+
if len(node.args) > 0:
|
| 471 |
+
input_node = node.args[0]
|
| 472 |
+
else:
|
| 473 |
+
input_node = node.kwargs["input"]
|
| 474 |
+
if (
|
| 475 |
+
input_node.op == "call_function"
|
| 476 |
+
and input_node.target == torch.nn.functional.linear
|
| 477 |
+
):
|
| 478 |
+
normalized = NormalizedLinearNode(input_node)
|
| 479 |
+
input = normalized.get_input()
|
| 480 |
+
weight = normalized.get_weight()
|
| 481 |
+
bias = normalized.get_bias()
|
| 482 |
+
with module.graph.inserting_before(node):
|
| 483 |
+
fused_node = module.graph.call_function(
|
| 484 |
+
linear_transpose, args=(input, weight, bias)
|
| 485 |
+
)
|
| 486 |
+
node.replace_all_uses_with(fused_node)
|
| 487 |
+
module.graph.erase_node(node)
|
| 488 |
+
if len(input_node.users) == 0:
|
| 489 |
+
module.graph.erase_node(input_node)
|
| 490 |
+
|
| 491 |
+
module.graph.lint()
|
| 492 |
+
module.recompile()
|
| 493 |
+
return module
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
# Y1 = X * W^T + bias
|
| 497 |
+
# Y2 = Y1.permute(0, 2, 1)
|
| 498 |
+
# ---->
|
| 499 |
+
# Y2 = (W * X^T + bias.unsqueeze(-1))^T
|
| 500 |
+
def linear_transpose(
|
| 501 |
+
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
|
| 502 |
+
) -> torch.Tensor:
|
| 503 |
+
if bias is None:
|
| 504 |
+
return torch.matmul(weight, input.transpose(-1, -2))
|
| 505 |
+
return torch.matmul(weight, input.transpose(-1, -2)) + bias.unsqueeze(-1)
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def permute_linear_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 509 |
+
for node in module.graph.nodes:
|
| 510 |
+
if node.op == "call_function" and node.target == torch.nn.functional.linear:
|
| 511 |
+
if len(node.args) > 0:
|
| 512 |
+
input_node = node.args[0]
|
| 513 |
+
else:
|
| 514 |
+
input_node = node.kwargs["input"]
|
| 515 |
+
if (
|
| 516 |
+
input_node.op == "call_method"
|
| 517 |
+
and input_node.target == "permute"
|
| 518 |
+
and check_permute(input_node)
|
| 519 |
+
):
|
| 520 |
+
normalized = NormalizedLinearNode(node)
|
| 521 |
+
if len(input_node.args) > 0:
|
| 522 |
+
input = input_node.args[0]
|
| 523 |
+
else:
|
| 524 |
+
input = input_node.kwargs["input"]
|
| 525 |
+
weight = normalized.get_weight()
|
| 526 |
+
bias = normalized.get_bias()
|
| 527 |
+
with module.graph.inserting_before(node):
|
| 528 |
+
fused_node = module.graph.call_function(
|
| 529 |
+
transpose_linear, args=(input, weight, bias)
|
| 530 |
+
)
|
| 531 |
+
node.replace_all_uses_with(fused_node)
|
| 532 |
+
module.graph.erase_node(node)
|
| 533 |
+
if len(input_node.users) == 0:
|
| 534 |
+
module.graph.erase_node(input_node)
|
| 535 |
+
|
| 536 |
+
module.graph.lint()
|
| 537 |
+
module.recompile()
|
| 538 |
+
return module
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def permute_matmul_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 542 |
+
for node in module.graph.nodes:
|
| 543 |
+
if node.op == "call_function" and (
|
| 544 |
+
node.target == torch.bmm or node.target == torch.matmul
|
| 545 |
+
):
|
| 546 |
+
normalized = NormalizedMatmulNode(node)
|
| 547 |
+
input_A_node = normalized.get_input()
|
| 548 |
+
input_B_node = normalized.get_other()
|
| 549 |
+
input_A = input_A_node
|
| 550 |
+
input_B = input_B_node
|
| 551 |
+
Atrans = Btrans = False
|
| 552 |
+
if (
|
| 553 |
+
input_A_node.op == "call_method"
|
| 554 |
+
and input_A_node.target == "permute"
|
| 555 |
+
and check_permute(input_A_node)
|
| 556 |
+
):
|
| 557 |
+
Atrans = True
|
| 558 |
+
if len(input_A_node.args) > 0:
|
| 559 |
+
input_A = input_A_node.args[0] # type: ignore[assignment]
|
| 560 |
+
else:
|
| 561 |
+
input_A = input_A_node.kwargs["input"] # type: ignore[assignment]
|
| 562 |
+
|
| 563 |
+
if (
|
| 564 |
+
input_B_node.op == "call_method"
|
| 565 |
+
and input_B_node.target == "permute"
|
| 566 |
+
and check_permute(input_B_node)
|
| 567 |
+
):
|
| 568 |
+
Btrans = True
|
| 569 |
+
if len(input_B_node.args) > 0:
|
| 570 |
+
input_B = input_B_node.args[0] # type: ignore[assignment]
|
| 571 |
+
else:
|
| 572 |
+
input_B = input_B_node.kwargs["input"] # type: ignore[assignment]
|
| 573 |
+
|
| 574 |
+
if Atrans or Btrans:
|
| 575 |
+
with module.graph.inserting_before(node):
|
| 576 |
+
fused_node = module.graph.call_function(
|
| 577 |
+
transpose_matmul,
|
| 578 |
+
args=(input_A, input_B, Atrans, Btrans),
|
| 579 |
+
)
|
| 580 |
+
node.replace_all_uses_with(fused_node)
|
| 581 |
+
module.graph.erase_node(node)
|
| 582 |
+
if Atrans and len(input_A_node.users) == 0:
|
| 583 |
+
module.graph.erase_node(input_A_node)
|
| 584 |
+
if Btrans and len(input_B_node.users) == 0:
|
| 585 |
+
module.graph.erase_node(input_B_node)
|
| 586 |
+
|
| 587 |
+
module.graph.lint()
|
| 588 |
+
module.recompile()
|
| 589 |
+
return module
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
# X1 = X.permute(0, 2, 1)
|
| 593 |
+
# Y1 = X1 * W1^T + bias1
|
| 594 |
+
# ---->
|
| 595 |
+
# Y2 = X1.transpose(-1, -2) * W1^T + bias1
|
| 596 |
+
def transpose_linear(
|
| 597 |
+
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
|
| 598 |
+
) -> torch.Tensor:
|
| 599 |
+
if bias is None:
|
| 600 |
+
return torch.matmul(input.transpose(-1, -2), weight.t())
|
| 601 |
+
return torch.matmul(input.transpose(-1, -2), weight.t()) + bias
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def transpose_matmul(
|
| 605 |
+
A: torch.Tensor, B: torch.Tensor, Atrans: bool, Btrans: bool
|
| 606 |
+
) -> torch.Tensor:
|
| 607 |
+
if Atrans:
|
| 608 |
+
A = A.transpose(-1, -2)
|
| 609 |
+
if Btrans:
|
| 610 |
+
B = B.transpose(-1, -2)
|
| 611 |
+
return torch.matmul(A, B)
|
vila/lib/python3.10/site-packages/torch/_inductor/fx_passes/reinplace.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Any, Callable, Dict, List, Tuple
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch._higher_order_ops.triton_kernel_wrap import triton_kernel_wrapper_functional
|
| 8 |
+
from torch._inductor import inductor_prims
|
| 9 |
+
from torch._inductor.fx_utils import get_node_storage, is_node_realized
|
| 10 |
+
from torch._inductor.lowering import (
|
| 11 |
+
inplaceable_foreach_ops as inplaceable_foreach_ops_lowerings,
|
| 12 |
+
)
|
| 13 |
+
from torch._inductor.virtualized import V
|
| 14 |
+
from torch.fx.immutable_collections import immutable_dict
|
| 15 |
+
from torch.fx.passes.reinplace import _is_view_op
|
| 16 |
+
from torch.utils import _pytree as pytree
|
| 17 |
+
|
| 18 |
+
aten = torch.ops.aten
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclass(frozen=True)
|
| 22 |
+
class InplaceableOp:
|
| 23 |
+
inplace_op: Callable[..., Any]
|
| 24 |
+
mutated_arg: int
|
| 25 |
+
extra_check: Callable[[torch.fx.Node], bool] = lambda node: True
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
_SCATTER_OP_TO_VIEW = {
|
| 29 |
+
torch.ops.aten.diagonal_scatter.default: torch.ops.aten.diagonal.default,
|
| 30 |
+
torch.ops.aten.select_scatter.default: torch.ops.aten.select.int,
|
| 31 |
+
torch.ops.aten.slice_scatter.default: torch.ops.aten.slice.Tensor,
|
| 32 |
+
torch.ops.aten.as_strided_scatter.default: torch.ops.aten.as_strided.default,
|
| 33 |
+
}
|
| 34 |
+
_VIEW_OP_TO_SCATTER = {v: k for k, v in _SCATTER_OP_TO_VIEW.items()}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def graph_call_function(graph: torch.fx.Graph, fn, *args, **kwargs):
|
| 38 |
+
fake_args, fake_kwargs = pytree.tree_map(
|
| 39 |
+
lambda node: node.meta["val"] if isinstance(node, torch.fx.Node) else node,
|
| 40 |
+
(args, kwargs),
|
| 41 |
+
)
|
| 42 |
+
with V.fake_mode:
|
| 43 |
+
fake_result = fn(*fake_args, **fake_kwargs)
|
| 44 |
+
|
| 45 |
+
node = graph.call_function(fn, args, kwargs)
|
| 46 |
+
node.meta["val"] = fake_result
|
| 47 |
+
return node
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@dataclass
|
| 51 |
+
class ViewOp:
|
| 52 |
+
target: torch._ops.OpOverload
|
| 53 |
+
args: Tuple[Any, ...]
|
| 54 |
+
kwargs: Dict[str, Any]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _inplace_generalized_scatter(
|
| 58 |
+
inp: torch.Tensor, src: torch.Tensor, view_ops: List[ViewOp]
|
| 59 |
+
) -> torch.Tensor:
|
| 60 |
+
tmp = inp
|
| 61 |
+
for view in view_ops:
|
| 62 |
+
fake_args, fake_kwargs = pytree.tree_map(
|
| 63 |
+
lambda node: node.meta["val"] if isinstance(node, torch.fx.Node) else node,
|
| 64 |
+
(view.args, view.kwargs),
|
| 65 |
+
)
|
| 66 |
+
tmp = view.target(tmp, *fake_args, **fake_kwargs)
|
| 67 |
+
tmp.copy_(src)
|
| 68 |
+
return inp
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _generalized_scatter(
|
| 72 |
+
inp: torch.Tensor, src: torch.Tensor, view_ops: List[ViewOp]
|
| 73 |
+
) -> torch.Tensor:
|
| 74 |
+
out = inp.clone()
|
| 75 |
+
return _inplace_generalized_scatter(out, src, view_ops)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _decompose_scatter_functional_helper(
|
| 79 |
+
graph: torch.fx.Graph,
|
| 80 |
+
inp: torch.Tensor,
|
| 81 |
+
src: torch.Tensor,
|
| 82 |
+
view_ops: List[ViewOp],
|
| 83 |
+
) -> torch.fx.Node:
|
| 84 |
+
view_op, view_ops_tail = view_ops[0], view_ops[1:]
|
| 85 |
+
|
| 86 |
+
if view_ops_tail:
|
| 87 |
+
view = graph_call_function(
|
| 88 |
+
graph, view_op.target, inp, *view_op.args, **view_op.kwargs
|
| 89 |
+
)
|
| 90 |
+
src = _decompose_scatter_functional_helper(graph, view, src, view_ops[1:]) # type: ignore[assignment]
|
| 91 |
+
|
| 92 |
+
return graph_call_function(
|
| 93 |
+
graph,
|
| 94 |
+
_VIEW_OP_TO_SCATTER[view_op.target],
|
| 95 |
+
inp,
|
| 96 |
+
src,
|
| 97 |
+
*view_op.args,
|
| 98 |
+
**view_op.kwargs,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def _decompose_scatter_functional(
|
| 103 |
+
graph: torch.fx.Graph, node: torch.fx.Node
|
| 104 |
+
) -> torch.fx.Node:
|
| 105 |
+
"""Decompose _generalized_scatter to a sequence of view_scatter operations
|
| 106 |
+
|
| 107 |
+
e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)])
|
| 108 |
+
|
| 109 |
+
will become
|
| 110 |
+
|
| 111 |
+
view = aten.slice(inp, 0, 0, 10)
|
| 112 |
+
view_updated = aten.slice_scatter(view, src, 1, 10, -10)
|
| 113 |
+
inp_updated = aten.slice_scatter(inp, view_updated, 0, 0, 10)
|
| 114 |
+
"""
|
| 115 |
+
assert node.target is _generalized_scatter
|
| 116 |
+
inp, src, view_ops = node.args
|
| 117 |
+
return _decompose_scatter_functional_helper(graph, *node.args) # type: ignore[arg-type]
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _decompose_scatter_mutating(
|
| 121 |
+
graph: torch.fx.Graph, node: torch.fx.Node
|
| 122 |
+
) -> torch.fx.Node:
|
| 123 |
+
"""Decompose _generalized_scatter using mutations
|
| 124 |
+
|
| 125 |
+
e.g. _generalized_scatter(inp, src, [(aten.slice, 0, 0, 10), (aten.slice, 1, 10, -10)])
|
| 126 |
+
|
| 127 |
+
will become
|
| 128 |
+
|
| 129 |
+
inp_updated = aten.clone(inp)
|
| 130 |
+
slice1 = aten.slice(inp_updated, 0, 0, 10)
|
| 131 |
+
slice2 = aten.slice(slice1, 1, 10, -10)
|
| 132 |
+
slice2.copy_(src)
|
| 133 |
+
|
| 134 |
+
"""
|
| 135 |
+
assert node.target in (_generalized_scatter, _inplace_generalized_scatter)
|
| 136 |
+
inp, src, view_ops = node.args
|
| 137 |
+
assert not node.kwargs
|
| 138 |
+
|
| 139 |
+
if node.target is _generalized_scatter:
|
| 140 |
+
inp = graph_call_function(graph, aten.clone, inp)
|
| 141 |
+
|
| 142 |
+
tmp = inp
|
| 143 |
+
for view in view_ops: # type: ignore[union-attr]
|
| 144 |
+
tmp = graph_call_function(graph, view.target, tmp, *view.args, **view.kwargs) # type: ignore[union-attr]
|
| 145 |
+
|
| 146 |
+
graph_call_function(graph, aten.copy_.default, tmp, src)
|
| 147 |
+
return inp # type: ignore[return-value]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# View ops whose view_scatter op is lowered into mutations anyway,
|
| 151 |
+
# so is never a pessimisation to decompose.
|
| 152 |
+
_ALWAYS_MUTATING_SCATTER_OPS = {
|
| 153 |
+
aten.as_strided.default,
|
| 154 |
+
aten.diagonal.default,
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def scatter_always_uses_mutation(node: torch.fx.Node) -> bool:
|
| 159 |
+
_, _, view_ops = node.args
|
| 160 |
+
return any(view.target in _ALWAYS_MUTATING_SCATTER_OPS for view in view_ops) # type: ignore[union-attr]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def should_reinplace_scatter(node: torch.fx.Node) -> bool:
|
| 164 |
+
"""Choose between mutating and functional scatter decompositions
|
| 165 |
+
|
| 166 |
+
Reinplacing view scatter ops can be pessimising as it blocks fusion with the
|
| 167 |
+
input or output tensor computations. However, it is still profitable if the
|
| 168 |
+
input and output would have been realized anyway.
|
| 169 |
+
|
| 170 |
+
"""
|
| 171 |
+
inp, src, view_ops = node.args
|
| 172 |
+
|
| 173 |
+
# Mutating scatter ops unconditionally realize input and output
|
| 174 |
+
if scatter_always_uses_mutation(node):
|
| 175 |
+
return True
|
| 176 |
+
|
| 177 |
+
if is_node_realized(inp) and is_node_realized(node): # type: ignore[arg-type]
|
| 178 |
+
return True
|
| 179 |
+
|
| 180 |
+
# If the output is copied back into the input, this forces both to be
|
| 181 |
+
# realized as the output is a user of the input
|
| 182 |
+
if inp.op == "placeholder" and any( # type: ignore[union-attr]
|
| 183 |
+
user.target is aten.copy_.default and user.args[0] is inp for user in node.users
|
| 184 |
+
):
|
| 185 |
+
return True
|
| 186 |
+
|
| 187 |
+
# Otherwise, assume fusions will make functional variants profitable
|
| 188 |
+
return False
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def decompose_generalized_scatter(graph: torch.fx.Graph) -> None:
|
| 192 |
+
"""Replace _generalized_scatter with normal aten ops"""
|
| 193 |
+
for node in graph.nodes:
|
| 194 |
+
if node.target not in (_generalized_scatter, _inplace_generalized_scatter):
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
use_mutation = (
|
| 198 |
+
node.target is _inplace_generalized_scatter
|
| 199 |
+
or scatter_always_uses_mutation(node)
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
with graph.inserting_before(node):
|
| 203 |
+
if use_mutation:
|
| 204 |
+
new_node = _decompose_scatter_mutating(graph, node)
|
| 205 |
+
else:
|
| 206 |
+
new_node = _decompose_scatter_functional(graph, node)
|
| 207 |
+
|
| 208 |
+
node.replace_all_uses_with(new_node)
|
| 209 |
+
graph.erase_node(node)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def canonicalize_view_scatter_ops(graph: torch.fx.Graph) -> None:
|
| 213 |
+
"""
|
| 214 |
+
This canonicalizes view scatter ops into a generalized form, defined as:
|
| 215 |
+
def scatter(inp, src, views):
|
| 216 |
+
tmp = inp.clone()
|
| 217 |
+
for view in views:
|
| 218 |
+
tmp = view(tmp)
|
| 219 |
+
tmp.copy_(src)
|
| 220 |
+
|
| 221 |
+
We also fuse consecutive view scatter ops of the form
|
| 222 |
+
a = scatter(view2(self), src, [view1])
|
| 223 |
+
b = scatter(self, a, [view2])
|
| 224 |
+
which can be rewritten as
|
| 225 |
+
b = scatter(self, src, [view2, view1])
|
| 226 |
+
a = view2(b)
|
| 227 |
+
|
| 228 |
+
This is both more efficient as we only do a single scatter, and also
|
| 229 |
+
easier to reinplace since there is only one use of `self`
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
node_to_view_base: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 233 |
+
node_to_view_op: Dict[torch.fx.Node, List[ViewOp]] = defaultdict(list)
|
| 234 |
+
|
| 235 |
+
def handle_views(node: torch.fx.Node):
|
| 236 |
+
inp = node.args[0]
|
| 237 |
+
node_to_view_base[node] = node_to_view_base.get(inp, inp) # type: ignore[arg-type]
|
| 238 |
+
node_to_view_op[node] = [
|
| 239 |
+
*node_to_view_op[inp], # type: ignore[index]
|
| 240 |
+
ViewOp(
|
| 241 |
+
node.target, # type: ignore[arg-type]
|
| 242 |
+
args=node.args[1:],
|
| 243 |
+
kwargs=node.kwargs,
|
| 244 |
+
),
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
def handle_view_scatter(node: torch.fx.Node):
|
| 248 |
+
assert len(node.args) >= 2
|
| 249 |
+
inp, src = node.args[:2]
|
| 250 |
+
|
| 251 |
+
scatter_view_op = ViewOp(
|
| 252 |
+
_SCATTER_OP_TO_VIEW[node.target],
|
| 253 |
+
args=node.args[2:],
|
| 254 |
+
kwargs=node.kwargs,
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
def can_fuse():
|
| 258 |
+
if src.target is not _generalized_scatter: # type: ignore[union-attr]
|
| 259 |
+
return False
|
| 260 |
+
src_inp, src_src, src_scatter_view_op = src.args # type: ignore[union-attr]
|
| 261 |
+
|
| 262 |
+
inp_base = node_to_view_base.get(inp, inp) # type: ignore[arg-type]
|
| 263 |
+
src_base = node_to_view_base.get(src_inp, src_inp) # type: ignore[arg-type]
|
| 264 |
+
return inp_base is src_base and node_to_view_op[src_inp] == [ # type: ignore[index]
|
| 265 |
+
*node_to_view_op[inp], # type: ignore[index]
|
| 266 |
+
scatter_view_op,
|
| 267 |
+
]
|
| 268 |
+
|
| 269 |
+
if not can_fuse():
|
| 270 |
+
with graph.inserting_before(node):
|
| 271 |
+
new_node = graph_call_function(
|
| 272 |
+
graph,
|
| 273 |
+
_generalized_scatter,
|
| 274 |
+
inp,
|
| 275 |
+
src,
|
| 276 |
+
[scatter_view_op],
|
| 277 |
+
)
|
| 278 |
+
node.replace_all_uses_with(new_node)
|
| 279 |
+
graph.erase_node(node)
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
src_inp, src_src, src_scatter_view_op = src.args # type: ignore[union-attr]
|
| 283 |
+
with graph.inserting_before(src):
|
| 284 |
+
new_node = graph_call_function(
|
| 285 |
+
graph,
|
| 286 |
+
_generalized_scatter,
|
| 287 |
+
inp,
|
| 288 |
+
src_src,
|
| 289 |
+
[scatter_view_op, *src_scatter_view_op], # type: ignore[misc]
|
| 290 |
+
)
|
| 291 |
+
node.replace_all_uses_with(new_node)
|
| 292 |
+
graph.erase_node(node)
|
| 293 |
+
|
| 294 |
+
if src.users: # type: ignore[union-attr]
|
| 295 |
+
new_src = graph_call_function(
|
| 296 |
+
graph,
|
| 297 |
+
_SCATTER_OP_TO_VIEW[node.target],
|
| 298 |
+
new_node,
|
| 299 |
+
*node.args[2:],
|
| 300 |
+
**node.kwargs,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
handle_views(new_src)
|
| 304 |
+
src.replace_all_uses_with(new_src) # type: ignore[union-attr]
|
| 305 |
+
|
| 306 |
+
graph.erase_node(src)
|
| 307 |
+
|
| 308 |
+
for node in graph.nodes:
|
| 309 |
+
if _is_view_op(node.target):
|
| 310 |
+
handle_views(node)
|
| 311 |
+
elif node.target in _SCATTER_OP_TO_VIEW:
|
| 312 |
+
handle_view_scatter(node)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
inplaceable_ops = {
|
| 316 |
+
aten.index_put.default: InplaceableOp(aten.index_put_.default, 0),
|
| 317 |
+
aten._unsafe_index_put.default: InplaceableOp(inductor_prims._unsafe_index_put_, 0),
|
| 318 |
+
_generalized_scatter: InplaceableOp(
|
| 319 |
+
_inplace_generalized_scatter,
|
| 320 |
+
0,
|
| 321 |
+
extra_check=should_reinplace_scatter,
|
| 322 |
+
),
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
try:
|
| 326 |
+
c10d_functional = torch.ops._c10d_functional
|
| 327 |
+
inplaceable_collective_ops = {
|
| 328 |
+
c10d_functional.all_reduce.default: InplaceableOp(
|
| 329 |
+
c10d_functional.all_reduce_.default, 0
|
| 330 |
+
),
|
| 331 |
+
c10d_functional.all_reduce_coalesced.default: InplaceableOp(
|
| 332 |
+
c10d_functional.all_reduce_coalesced_.default, 0
|
| 333 |
+
),
|
| 334 |
+
}
|
| 335 |
+
inplaceable_ops.update(inplaceable_collective_ops)
|
| 336 |
+
except AttributeError:
|
| 337 |
+
# _c10d_functional ops are only available when torch
|
| 338 |
+
# is built with USE_DISTRIBUTED=1.
|
| 339 |
+
pass
|
| 340 |
+
|
| 341 |
+
inplaceable_foreach_ops: Dict[torch._ops.OpOverload, InplaceableOp] = {}
|
| 342 |
+
for outplace_op, inplace_op in inplaceable_foreach_ops_lowerings.items():
|
| 343 |
+
inplaceable_foreach_ops[outplace_op] = InplaceableOp(inplace_op, 0)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
inplaceable_triton_ops = {triton_kernel_wrapper_functional}
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
# Operators that don't depend on the tensor data
|
| 350 |
+
META_ONLY_OPS = {
|
| 351 |
+
aten.sym_size.int,
|
| 352 |
+
aten.sym_stride.int,
|
| 353 |
+
aten.sym_numel.default,
|
| 354 |
+
aten.sym_storage_offset.default,
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def reinplace_inplaceable_ops_core(graph: torch.fx.Graph) -> None:
|
| 359 |
+
"""
|
| 360 |
+
Reinplaces in-placeable operations.
|
| 361 |
+
If there are no uses of a view of the mutated arg after the current node,
|
| 362 |
+
it is possible to inplace the op.
|
| 363 |
+
This above algorithm could be justified by observing side effects. While
|
| 364 |
+
we traverse the graph in forwards direction, only latter nodes could view
|
| 365 |
+
side effects of the current node. If the current node is not used later as
|
| 366 |
+
well as no view of this node is used later in the graph, then it is safe to
|
| 367 |
+
inplace as there would be no way to observe the side effects.
|
| 368 |
+
This condition is slightly different for graph inputs where they can only
|
| 369 |
+
be inplaced if the above condition is true and there's a copy_ in the
|
| 370 |
+
epilogue that signals that the caller wants to observe the mutation.
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
copy_args_to_copy_nodes = {}
|
| 374 |
+
mutated_inputs = set()
|
| 375 |
+
storage_to_nodes = defaultdict(list)
|
| 376 |
+
node_order: Dict[Any, int] = {}
|
| 377 |
+
for i, node in enumerate(reversed(graph.nodes)):
|
| 378 |
+
node_order[node] = len(graph.nodes) - i - 1
|
| 379 |
+
storage_to_nodes[get_node_storage(node)].append(node)
|
| 380 |
+
if node.target == aten.copy_.default and node.args[0].op == "placeholder":
|
| 381 |
+
dst = node.args[0]
|
| 382 |
+
src = node.args[1]
|
| 383 |
+
# If the target is a getitem and it indexes a possible clone,
|
| 384 |
+
# then skip over it
|
| 385 |
+
if src.target == operator.getitem and (
|
| 386 |
+
(
|
| 387 |
+
src.args[0].target == triton_kernel_wrapper_functional
|
| 388 |
+
and src.args[0].kwargs["kwargs"][src.args[1]] == node.args[0]
|
| 389 |
+
)
|
| 390 |
+
or (src.args[0].target in inplaceable_foreach_ops)
|
| 391 |
+
or (src.args[0].target == torch.ops.higher_order.auto_functionalized)
|
| 392 |
+
):
|
| 393 |
+
src = src.args[0]
|
| 394 |
+
|
| 395 |
+
copy_args_to_copy_nodes[(dst, src)] = node
|
| 396 |
+
|
| 397 |
+
mutated_inputs.add(node.args[0])
|
| 398 |
+
|
| 399 |
+
def any_use_of_views_after_node(node, shared_view_nodes, *, copy_node):
|
| 400 |
+
node_loc = node_order[node]
|
| 401 |
+
copy_node_loc = node_order[copy_node] if copy_node is not None else None
|
| 402 |
+
|
| 403 |
+
def is_meta_only_user(node):
|
| 404 |
+
if _is_view_op(node.target):
|
| 405 |
+
return all(is_meta_only_user(u) for u in node.users)
|
| 406 |
+
return node.target in META_ONLY_OPS
|
| 407 |
+
|
| 408 |
+
for view in shared_view_nodes:
|
| 409 |
+
for user in view.users:
|
| 410 |
+
user_loc = node_order[user]
|
| 411 |
+
# Skip all users before node
|
| 412 |
+
if user_loc <= node_loc:
|
| 413 |
+
continue
|
| 414 |
+
# Ignore uses after the copy_ epilogue node, where the input
|
| 415 |
+
# has already been mutated anyway
|
| 416 |
+
if copy_node_loc is not None and copy_node_loc <= user_loc:
|
| 417 |
+
continue
|
| 418 |
+
# Reinplacing does not change shape metadata
|
| 419 |
+
if is_meta_only_user(user):
|
| 420 |
+
continue
|
| 421 |
+
return True
|
| 422 |
+
return False
|
| 423 |
+
|
| 424 |
+
def can_inplace(node, mutated_arg):
|
| 425 |
+
if isinstance(mutated_arg, (list, tuple)):
|
| 426 |
+
return all(can_inplace(node, arg) for arg in mutated_arg)
|
| 427 |
+
|
| 428 |
+
if get_node_storage(mutated_arg) is None:
|
| 429 |
+
return False
|
| 430 |
+
shared_view_nodes = storage_to_nodes[get_node_storage(mutated_arg)]
|
| 431 |
+
if mutated_arg.op == "placeholder":
|
| 432 |
+
if not (
|
| 433 |
+
copy_node := copy_args_to_copy_nodes.get((mutated_arg, node), False)
|
| 434 |
+
):
|
| 435 |
+
return False
|
| 436 |
+
|
| 437 |
+
if any_use_of_views_after_node(
|
| 438 |
+
node, shared_view_nodes, copy_node=copy_node
|
| 439 |
+
):
|
| 440 |
+
return False
|
| 441 |
+
|
| 442 |
+
return True
|
| 443 |
+
elif any(view.op == "placeholder" for view in shared_view_nodes):
|
| 444 |
+
# If mutated arg is view of any of the inputs of the graph,
|
| 445 |
+
# do not allow for inplacing.
|
| 446 |
+
# This would require more sophisticated algorithm to handle
|
| 447 |
+
return False
|
| 448 |
+
else:
|
| 449 |
+
return not any_use_of_views_after_node(
|
| 450 |
+
node, shared_view_nodes, copy_node=None
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
replace_dict: Dict[torch.fx.Node, torch.fx.Node] = {}
|
| 454 |
+
|
| 455 |
+
def reinplace_and_refine_tensors_to_clone(old_tensors_to_clone, kwargs):
|
| 456 |
+
tensors_to_clone: List[str] = []
|
| 457 |
+
for arg in old_tensors_to_clone:
|
| 458 |
+
assert arg in kwargs
|
| 459 |
+
mutated_arg = kwargs[arg]
|
| 460 |
+
if can_inplace(node, mutated_arg):
|
| 461 |
+
copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
|
| 462 |
+
if copy_node is not None:
|
| 463 |
+
replace_dict[copy_node] = copy_node.args[0]
|
| 464 |
+
for user in node.users:
|
| 465 |
+
if user.target == operator.getitem and user.args[1] == arg:
|
| 466 |
+
replace_dict[user] = mutated_arg
|
| 467 |
+
else:
|
| 468 |
+
tensors_to_clone.append(arg)
|
| 469 |
+
return tensors_to_clone
|
| 470 |
+
|
| 471 |
+
for node in graph.nodes:
|
| 472 |
+
if (inplaceable_op := inplaceable_ops.get(node.target, None)) is not None:
|
| 473 |
+
mutated_arg = node.args[inplaceable_op.mutated_arg]
|
| 474 |
+
if can_inplace(node, mutated_arg) and inplaceable_op.extra_check(node):
|
| 475 |
+
# TODO(yifu): this doesn't properly remove copy epilogues for
|
| 476 |
+
# ops that mutate multiple inputs. Need to revise the copy
|
| 477 |
+
# node tracking logic to support the case.
|
| 478 |
+
copy_node = copy_args_to_copy_nodes.get((mutated_arg, node))
|
| 479 |
+
if copy_node is not None:
|
| 480 |
+
replace_dict[copy_node] = copy_node.args[0]
|
| 481 |
+
node.target = inplaceable_op.inplace_op
|
| 482 |
+
elif node.target == torch.ops.higher_order.auto_functionalized:
|
| 483 |
+
_mutable_op = node.args[0]
|
| 484 |
+
from torch._higher_order_ops.auto_functionalize import get_mutable_arg_names
|
| 485 |
+
|
| 486 |
+
tensors_to_clone = get_mutable_arg_names(_mutable_op)
|
| 487 |
+
# Don't try to reinplace Optional[Tensor] args that are None.
|
| 488 |
+
tensors_to_clone = [
|
| 489 |
+
t for t in tensors_to_clone if node.kwargs[t] is not None
|
| 490 |
+
]
|
| 491 |
+
tensors_to_clone = reinplace_and_refine_tensors_to_clone(
|
| 492 |
+
tensors_to_clone, node.kwargs
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
# Stash the metadata. There is a pass later on where we decompose
|
| 496 |
+
# auto_functionalized into clones + a mutable op; this metadata
|
| 497 |
+
# tells the decomp to only clone the following inputs
|
| 498 |
+
node.meta["only_clone_these_tensors"] = tensors_to_clone
|
| 499 |
+
elif node.target in inplaceable_triton_ops:
|
| 500 |
+
# inplaceable_triton_ops take an additional argument called
|
| 501 |
+
# tensors_to_clone which contain a list of tensors to clone
|
| 502 |
+
# This pass iterates over them and sees which ones are safe
|
| 503 |
+
# to eliminate (i.e. no longer need the clones)
|
| 504 |
+
tensors_to_clone = reinplace_and_refine_tensors_to_clone(
|
| 505 |
+
node.kwargs["tensors_to_clone"], node.kwargs["kwargs"]
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
kwargs = dict(node.kwargs)
|
| 509 |
+
kwargs["tensors_to_clone"] = tensors_to_clone
|
| 510 |
+
node.kwargs = immutable_dict(kwargs)
|
| 511 |
+
elif (
|
| 512 |
+
inplaceable_op := inplaceable_foreach_ops.get(node.target, None)
|
| 513 |
+
) is not None:
|
| 514 |
+
mutated_args = node.args[inplaceable_op.mutated_arg]
|
| 515 |
+
|
| 516 |
+
if not all((arg, node) in copy_args_to_copy_nodes for arg in mutated_args):
|
| 517 |
+
continue
|
| 518 |
+
|
| 519 |
+
if can_inplace(node, mutated_args):
|
| 520 |
+
for arg in mutated_args:
|
| 521 |
+
copy_node = copy_args_to_copy_nodes[(arg, node)]
|
| 522 |
+
replace_dict[copy_node] = copy_node.args[0]
|
| 523 |
+
|
| 524 |
+
node.target = inplaceable_op.inplace_op
|
| 525 |
+
for node, replacement in replace_dict.items():
|
| 526 |
+
while replacement in replace_dict:
|
| 527 |
+
replacement = replace_dict[replacement]
|
| 528 |
+
replace_dict[node] = replacement
|
| 529 |
+
|
| 530 |
+
node.replace_all_uses_with(replacement)
|
| 531 |
+
graph.erase_node(node)
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def reinplace_inplaceable_ops(graph: torch.fx.Graph) -> None:
|
| 535 |
+
canonicalize_view_scatter_ops(graph)
|
| 536 |
+
reinplace_inplaceable_ops_core(graph)
|
| 537 |
+
decompose_generalized_scatter(graph)
|
vila/lib/python3.10/site-packages/torch/_inductor/fx_utils.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from typing import Any, Callable, DefaultDict, Dict, Optional, Tuple, Type
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.fx
|
| 7 |
+
from torch.fx.experimental.symbolic_shapes import statically_known_true, sym_eq
|
| 8 |
+
from torch.utils import _pytree as pytree
|
| 9 |
+
from torch.utils._pytree import tree_map
|
| 10 |
+
from .virtualized import V
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Check the pattern: (nn.module, F.function/torch.Tensor.method) matched.
|
| 14 |
+
# Works for length 2 patterns with 1 module and 1 function/method.
|
| 15 |
+
def matches_module_function_pattern(
|
| 16 |
+
pattern: Tuple[Type[torch.nn.modules.Module], Callable[..., Any]],
|
| 17 |
+
node: torch.fx.node.Node,
|
| 18 |
+
modules: Dict[str, torch.nn.modules.Module],
|
| 19 |
+
) -> bool:
|
| 20 |
+
if len(node.args) == 0:
|
| 21 |
+
return False
|
| 22 |
+
if not isinstance(node.args[0], torch.fx.Node) or not isinstance(
|
| 23 |
+
node, torch.fx.Node
|
| 24 |
+
):
|
| 25 |
+
return False
|
| 26 |
+
# the first node is call_module
|
| 27 |
+
if node.args[0].op != "call_module":
|
| 28 |
+
return False
|
| 29 |
+
if not isinstance(node.args[0].target, str):
|
| 30 |
+
return False
|
| 31 |
+
if node.args[0].target not in modules:
|
| 32 |
+
return False
|
| 33 |
+
if type(modules[node.args[0].target]) is not pattern[0]:
|
| 34 |
+
return False
|
| 35 |
+
# the second node is call_function or call_method
|
| 36 |
+
if node.op != "call_function" and node.op != "call_method":
|
| 37 |
+
return False
|
| 38 |
+
if node.target != pattern[1]:
|
| 39 |
+
return False
|
| 40 |
+
# make sure node.args[0] output is only used by current node.
|
| 41 |
+
if len(node.args[0].users) > 1:
|
| 42 |
+
return False
|
| 43 |
+
return True
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class FakeTensorUpdater:
|
| 47 |
+
"""
|
| 48 |
+
The main idea here is that it's difficult to maintain accurate fake
|
| 49 |
+
tensors (our primary form of metadata) for each node in our graph as we
|
| 50 |
+
transform it.
|
| 51 |
+
|
| 52 |
+
The most reliable way to obtain this information is by rerunning
|
| 53 |
+
faketensor propagation. However, in general, faketensor propagation is
|
| 54 |
+
fairly expensive. So, instead we'd like to only rerun faketensor
|
| 55 |
+
propagation on nodes that have changed.
|
| 56 |
+
|
| 57 |
+
In order to detect which nodes have changed, we first hash its node,
|
| 58 |
+
target, and argument lists (which are immutable in FX).
|
| 59 |
+
|
| 60 |
+
Then, whenever we call incremental_update, we check which FX nodes have a
|
| 61 |
+
new hash, and recompute the faketensor metadata for that node. Then, we
|
| 62 |
+
continue to recursively compute the faketensors for all users until the
|
| 63 |
+
fake tensors stop changing.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(self, graph: torch.fx.Graph):
|
| 67 |
+
self.processed_hashes = set()
|
| 68 |
+
self.graph = graph
|
| 69 |
+
|
| 70 |
+
for node in self.graph.nodes:
|
| 71 |
+
self.processed_hashes.add(self.hash_node(node))
|
| 72 |
+
|
| 73 |
+
def hash_node(self, node: torch.fx.Node):
|
| 74 |
+
# todo(chilli): Not a great hash function
|
| 75 |
+
return (node, node.target, id(node.args), id(node.kwargs))
|
| 76 |
+
|
| 77 |
+
def incremental_update(self):
|
| 78 |
+
processed = set()
|
| 79 |
+
existing_storages: DefaultDict[Optional[int], int] = defaultdict(int)
|
| 80 |
+
for node in self.graph.nodes:
|
| 81 |
+
existing_storages[get_node_storage(node)] += 1
|
| 82 |
+
|
| 83 |
+
def is_intlist_same(new, old):
|
| 84 |
+
return statically_known_true(sym_eq(new, old))
|
| 85 |
+
|
| 86 |
+
def is_fake_tensor_same(new, old):
|
| 87 |
+
if type(new) != type(old):
|
| 88 |
+
return False
|
| 89 |
+
if isinstance(new, (list, tuple)):
|
| 90 |
+
if len(new) != len(old):
|
| 91 |
+
return False
|
| 92 |
+
return all(
|
| 93 |
+
is_fake_tensor_same(new_i, old_i) for new_i, old_i in zip(new, old)
|
| 94 |
+
)
|
| 95 |
+
assert isinstance(new, torch.Tensor)
|
| 96 |
+
if not is_intlist_same(new.shape, old.shape) or new.layout != old.layout:
|
| 97 |
+
return False
|
| 98 |
+
if new.layout == torch.strided and (
|
| 99 |
+
not is_intlist_same(new.stride(), old.stride())
|
| 100 |
+
or not statically_known_true(
|
| 101 |
+
new.storage_offset() == old.storage_offset()
|
| 102 |
+
)
|
| 103 |
+
):
|
| 104 |
+
return False
|
| 105 |
+
|
| 106 |
+
if get_storage(new) == get_storage(old):
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
# This is the case where it returns a completely fresh storage that's used nowhere else.
|
| 110 |
+
if (
|
| 111 |
+
existing_storages[get_storage(old)] == 1
|
| 112 |
+
and get_storage(new) not in existing_storages
|
| 113 |
+
):
|
| 114 |
+
return True
|
| 115 |
+
return False
|
| 116 |
+
|
| 117 |
+
for node in self.graph.nodes:
|
| 118 |
+
if self.hash_node(node) in self.processed_hashes:
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
def is_aten_node(node):
|
| 122 |
+
return node.op == "call_function" and isinstance(
|
| 123 |
+
node.target, torch._ops.OpOverload
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if not is_aten_node(node):
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
processing = [node]
|
| 130 |
+
while len(processing) > 0:
|
| 131 |
+
updating_node = processing.pop()
|
| 132 |
+
if updating_node in processed:
|
| 133 |
+
continue
|
| 134 |
+
if is_aten_node(updating_node):
|
| 135 |
+
continue
|
| 136 |
+
|
| 137 |
+
is_valid, args, kwargs = get_fake_args_kwargs(updating_node)
|
| 138 |
+
if not is_valid:
|
| 139 |
+
continue
|
| 140 |
+
with V.fake_mode:
|
| 141 |
+
new_fake_tensor = updating_node.target(*args, **kwargs)
|
| 142 |
+
if "val" in updating_node.meta and is_fake_tensor_same(
|
| 143 |
+
new_fake_tensor, updating_node.meta["val"]
|
| 144 |
+
):
|
| 145 |
+
continue
|
| 146 |
+
updating_node.meta["val"] = new_fake_tensor
|
| 147 |
+
|
| 148 |
+
# todo(chilli): This code path is not exercised by our existing
|
| 149 |
+
# tests - add a test
|
| 150 |
+
existing_storages[get_node_storage(new_fake_tensor)] += 1
|
| 151 |
+
processed.add(updating_node)
|
| 152 |
+
processing.extend(updating_node.users)
|
| 153 |
+
|
| 154 |
+
self.processed_hashes.add(self.hash_node(updating_node))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def get_storage(t: torch.Tensor) -> int:
|
| 158 |
+
return t.untyped_storage()._cdata
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def get_node_storage(node: torch.fx.Node) -> Optional[int]:
|
| 162 |
+
if "val" not in node.meta:
|
| 163 |
+
return None
|
| 164 |
+
if not isinstance(node.meta["val"], torch.Tensor):
|
| 165 |
+
return None
|
| 166 |
+
if not torch._C._has_storage(node.meta["val"]):
|
| 167 |
+
return None
|
| 168 |
+
return get_storage(node.meta["val"])
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def get_fake(x):
|
| 172 |
+
if isinstance(x, torch.fx.Node):
|
| 173 |
+
if "val" not in x.meta:
|
| 174 |
+
return x
|
| 175 |
+
return x.meta["val"]
|
| 176 |
+
return x
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def get_fake_args_kwargs(x: torch.fx.Node) -> Tuple[bool, Tuple[Any], Dict[str, Any]]:
|
| 180 |
+
"""
|
| 181 |
+
First value returns a boolean if any of the input nodes don't have a faketensor.
|
| 182 |
+
"""
|
| 183 |
+
args, kwargs = tree_map(get_fake, (x.args, x.kwargs))
|
| 184 |
+
if any(
|
| 185 |
+
isinstance(a, torch.fx.Node) for a in pytree.arg_tree_leaves(*args, **kwargs)
|
| 186 |
+
):
|
| 187 |
+
return False, args, kwargs
|
| 188 |
+
return True, args, kwargs
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def is_node_realized(node: torch.fx.Node) -> bool:
|
| 192 |
+
"""Returns true if a node is always realized when lowered to inductor IR.
|
| 193 |
+
|
| 194 |
+
NOTE: This may return some false negatives. e.g. it doesn't
|
| 195 |
+
handle buffers realized heuristically during lowering, or
|
| 196 |
+
buffers realized indirectly through view ops.
|
| 197 |
+
"""
|
| 198 |
+
from torch._inductor.lowering import fallbacks, needs_realized_inputs
|
| 199 |
+
|
| 200 |
+
def is_buffer(node: torch.fx.Node) -> bool:
|
| 201 |
+
if node.op == "call_function" and node.target is operator.getitem:
|
| 202 |
+
# For nodes with multiple outputs, we get the fx graph:
|
| 203 |
+
# foo = torch.ops.aten.foo(...)
|
| 204 |
+
# getitem = foo[0]
|
| 205 |
+
# getitem_1 = foo[1]
|
| 206 |
+
# where we need to check if foo is a fallback kernel
|
| 207 |
+
return is_buffer(node.args[0]) # type: ignore[arg-type]
|
| 208 |
+
return node.op in ("placeholder", "output") or node.target in fallbacks
|
| 209 |
+
|
| 210 |
+
if is_buffer(node):
|
| 211 |
+
return True
|
| 212 |
+
|
| 213 |
+
def realizes_inputs(node: torch.fx.Node) -> bool:
|
| 214 |
+
return node.op == "output" or node.target in needs_realized_inputs
|
| 215 |
+
|
| 216 |
+
if any(realizes_inputs(user) for user in node.users):
|
| 217 |
+
return True
|
| 218 |
+
|
| 219 |
+
# Otherwise, assume node isn't realized
|
| 220 |
+
return False
|
vila/lib/python3.10/site-packages/torch/_inductor/graph.py
ADDED
|
@@ -0,0 +1,1324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
from contextlib import contextmanager
|
| 10 |
+
from typing import Any, Callable, DefaultDict, Dict, List, Optional, Set, Tuple
|
| 11 |
+
|
| 12 |
+
import sympy
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch._logging
|
| 16 |
+
import torch.fx
|
| 17 |
+
from torch._decomp import get_decompositions
|
| 18 |
+
from torch._dynamo.utils import defake, dynamo_timed
|
| 19 |
+
from torch._logging import LazyString, trace_structured
|
| 20 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 21 |
+
from torch.fx.experimental._backward_state import BackwardState
|
| 22 |
+
from torch.fx.experimental.sym_node import magic_methods, method_to_operator
|
| 23 |
+
from torch.fx.experimental.symbolic_shapes import has_free_symbols, ShapeEnv, SymTypes
|
| 24 |
+
from torch.utils._mode_utils import no_dispatch
|
| 25 |
+
|
| 26 |
+
from . import config, ir
|
| 27 |
+
from .codegen.common import (
|
| 28 |
+
DeviceOpOverrides,
|
| 29 |
+
get_device_op_overrides,
|
| 30 |
+
get_scheduling_for_device,
|
| 31 |
+
get_wrapper_codegen_for_device,
|
| 32 |
+
register_backend_for_device,
|
| 33 |
+
)
|
| 34 |
+
from .codegen.cpp_wrapper_cpu import CppWrapperCpu
|
| 35 |
+
from .codegen.cpp_wrapper_cuda import CppWrapperCuda
|
| 36 |
+
from .codegen.wrapper import WrapperCodeGen
|
| 37 |
+
from .exc import (
|
| 38 |
+
CppWrapperCodeGenError,
|
| 39 |
+
LoweringException,
|
| 40 |
+
MissingOperatorWithDecomp,
|
| 41 |
+
MissingOperatorWithoutDecomp,
|
| 42 |
+
)
|
| 43 |
+
from .ir import (
|
| 44 |
+
Constant,
|
| 45 |
+
FixedLayout,
|
| 46 |
+
InputBuffer,
|
| 47 |
+
Pointwise,
|
| 48 |
+
Reduction,
|
| 49 |
+
StorageBox,
|
| 50 |
+
TensorBox,
|
| 51 |
+
)
|
| 52 |
+
from .lowering import (
|
| 53 |
+
constrain_to_fx_strides,
|
| 54 |
+
FALLBACK_ALLOW_LIST,
|
| 55 |
+
fallback_handler,
|
| 56 |
+
fallback_node_due_to_unsupported_type,
|
| 57 |
+
layout_constraints,
|
| 58 |
+
lowerings,
|
| 59 |
+
make_fallback,
|
| 60 |
+
needs_realized_inputs,
|
| 61 |
+
unsupported_output_tensor,
|
| 62 |
+
)
|
| 63 |
+
from .sizevars import SizeVarAllocator
|
| 64 |
+
from .utils import convert_shape_to_inductor, gather_origins, get_sympy_Expr_dtype
|
| 65 |
+
from .virtualized import V
|
| 66 |
+
|
| 67 |
+
log = logging.getLogger(__name__)
|
| 68 |
+
perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
|
| 69 |
+
output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
if config.is_fbcode():
|
| 73 |
+
from torch._inductor.fb.utils import log_module_code
|
| 74 |
+
else:
|
| 75 |
+
|
| 76 |
+
def log_module_code(*args, **kwargs):
|
| 77 |
+
pass
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def supported_dtype_of_cpp_wrapper(dtype, cuda):
|
| 81 |
+
supported_dtype = {
|
| 82 |
+
torch.float32,
|
| 83 |
+
torch.float64,
|
| 84 |
+
torch.int64,
|
| 85 |
+
torch.int32,
|
| 86 |
+
torch.int16,
|
| 87 |
+
torch.int8,
|
| 88 |
+
torch.uint8,
|
| 89 |
+
torch.bool,
|
| 90 |
+
torch.bfloat16,
|
| 91 |
+
torch.complex32,
|
| 92 |
+
torch.complex64,
|
| 93 |
+
torch.complex128,
|
| 94 |
+
torch.float16,
|
| 95 |
+
}
|
| 96 |
+
if cuda:
|
| 97 |
+
supported_dtype.add(torch.float8_e4m3fn)
|
| 98 |
+
supported_dtype.add(torch.float8_e5m2)
|
| 99 |
+
supported_dtype.add(torch.float8_e4m3fnuz)
|
| 100 |
+
supported_dtype.add(torch.float8_e5m2fnuz)
|
| 101 |
+
|
| 102 |
+
return dtype in supported_dtype
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def may_get_constant_buffer_dtype(constant_buffer):
|
| 106 |
+
assert isinstance(
|
| 107 |
+
constant_buffer, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
|
| 108 |
+
), "get_constant_buffer_dtype only supports input of sympy.Symbol, sympy.Expr or sympy.core.numbers.Integer"
|
| 109 |
+
if isinstance(constant_buffer, sympy.core.numbers.Integer):
|
| 110 |
+
return torch.int64
|
| 111 |
+
|
| 112 |
+
if isinstance(constant_buffer, sympy.Expr):
|
| 113 |
+
return get_sympy_Expr_dtype(constant_buffer)
|
| 114 |
+
|
| 115 |
+
if constant_buffer.is_integer:
|
| 116 |
+
return torch.int64
|
| 117 |
+
elif constant_buffer.is_float:
|
| 118 |
+
return torch.float32
|
| 119 |
+
else:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def is_magic_method(op):
|
| 124 |
+
magic_ops = {method_to_operator(m) for m in magic_methods}
|
| 125 |
+
return op in magic_ops
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def getattr_recursive(obj, target):
|
| 129 |
+
target_atoms = target.split(".")
|
| 130 |
+
attr_itr = obj
|
| 131 |
+
for i, atom in enumerate(target_atoms):
|
| 132 |
+
if not hasattr(attr_itr, atom):
|
| 133 |
+
raise RuntimeError(
|
| 134 |
+
f"Node referenced nonexistent target {'.'.join(target_atoms[:i])}"
|
| 135 |
+
)
|
| 136 |
+
attr_itr = getattr(attr_itr, atom)
|
| 137 |
+
return attr_itr
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class GraphLowering(torch.fx.Interpreter):
|
| 141 |
+
graph_outputs: List[ir.IRNode]
|
| 142 |
+
|
| 143 |
+
def symbolic_sizes_strides(self, ex: torch.Tensor):
|
| 144 |
+
"""
|
| 145 |
+
Support dynamic shapes and dynamic strides by assigning variables
|
| 146 |
+
to each dimension. We duck-shape tensors, so if two tensors
|
| 147 |
+
have the same size they get assigned the same symbolic variable.
|
| 148 |
+
"""
|
| 149 |
+
if self.reuse_shape_env:
|
| 150 |
+
return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
|
| 151 |
+
ex.stride()
|
| 152 |
+
)
|
| 153 |
+
else:
|
| 154 |
+
from torch._dynamo.source import ConstantSource
|
| 155 |
+
|
| 156 |
+
# TODO: this should not be needed once #93059 lands
|
| 157 |
+
# https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
|
| 158 |
+
# TODO: make a dedicated UnknownSource for this?
|
| 159 |
+
# NB: This is using the legacy default behavior from
|
| 160 |
+
# create_symbolic_sizes_strides_storage_offset but we hope we can
|
| 161 |
+
# just delete this entirely
|
| 162 |
+
source = ConstantSource(
|
| 163 |
+
f"__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}"
|
| 164 |
+
)
|
| 165 |
+
(
|
| 166 |
+
size,
|
| 167 |
+
stride,
|
| 168 |
+
_,
|
| 169 |
+
) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
|
| 170 |
+
ex,
|
| 171 |
+
source,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
|
| 175 |
+
stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
|
| 176 |
+
return size, stride
|
| 177 |
+
|
| 178 |
+
def static_sizes_strides(self, ex: torch.Tensor):
|
| 179 |
+
"""
|
| 180 |
+
Primarily used to weights
|
| 181 |
+
"""
|
| 182 |
+
size = [sympy.Integer(i) for i in ex.size()]
|
| 183 |
+
stride = [sympy.Integer(i) for i in ex.stride()]
|
| 184 |
+
return size, stride
|
| 185 |
+
|
| 186 |
+
def init_backend_registration(self):
|
| 187 |
+
if get_scheduling_for_device("cpu") is None:
|
| 188 |
+
from .codegen.cpp import CppScheduling
|
| 189 |
+
|
| 190 |
+
register_backend_for_device("cpu", CppScheduling, WrapperCodeGen)
|
| 191 |
+
|
| 192 |
+
if get_scheduling_for_device("cuda") is None:
|
| 193 |
+
from .codegen.cuda_combined_scheduling import CUDACombinedScheduling
|
| 194 |
+
|
| 195 |
+
# CUDACombinedScheduling combines Triton and CUDA C++ scheduling for CUDA devices via delegation
|
| 196 |
+
register_backend_for_device("cuda", CUDACombinedScheduling, WrapperCodeGen)
|
| 197 |
+
|
| 198 |
+
def __init__(
|
| 199 |
+
self,
|
| 200 |
+
gm: torch.fx.GraphModule,
|
| 201 |
+
example_inputs: Optional[List[torch.Tensor]] = None,
|
| 202 |
+
shape_env=None,
|
| 203 |
+
num_static_inputs=None,
|
| 204 |
+
graph_id=None,
|
| 205 |
+
cpp_wrapper=False,
|
| 206 |
+
aot_mode=False,
|
| 207 |
+
user_visible_outputs=frozenset(),
|
| 208 |
+
layout_opt=None,
|
| 209 |
+
extern_node_serializer=None,
|
| 210 |
+
is_inference=False,
|
| 211 |
+
is_const_graph=False,
|
| 212 |
+
const_output_index=None,
|
| 213 |
+
const_code=None,
|
| 214 |
+
const_module=None,
|
| 215 |
+
name=None,
|
| 216 |
+
):
|
| 217 |
+
super().__init__(gm)
|
| 218 |
+
|
| 219 |
+
self.example_inputs = example_inputs
|
| 220 |
+
self.layout_opt = (
|
| 221 |
+
layout_opt
|
| 222 |
+
if layout_opt is not None
|
| 223 |
+
else self.decide_layout_opt(gm, is_inference=is_inference)
|
| 224 |
+
)
|
| 225 |
+
self.num_channels_last_conv = 0
|
| 226 |
+
self.is_inference = is_inference
|
| 227 |
+
self.is_const_graph = is_const_graph
|
| 228 |
+
self.const_code = const_code
|
| 229 |
+
self.const_module = const_module
|
| 230 |
+
|
| 231 |
+
self.extra_traceback = False # we do our own error wrapping
|
| 232 |
+
if shape_env is None:
|
| 233 |
+
shape_env = ShapeEnv()
|
| 234 |
+
self.reuse_shape_env = False
|
| 235 |
+
else:
|
| 236 |
+
self._shape_env = shape_env
|
| 237 |
+
self.reuse_shape_env = True
|
| 238 |
+
self._shape_env = shape_env
|
| 239 |
+
self.sizevars = SizeVarAllocator(shape_env)
|
| 240 |
+
self.graph_input_names: List[str] = []
|
| 241 |
+
self.graph_inputs: Dict[str, TensorBox] = {}
|
| 242 |
+
self.graph_inputs_original: Dict[str, InputBuffer] = {}
|
| 243 |
+
self.device_types: Set[str] = (
|
| 244 |
+
const_module.device_types if const_module else set()
|
| 245 |
+
)
|
| 246 |
+
self.device_idxs: Set[int] = const_module.device_idxs if const_module else set()
|
| 247 |
+
self.cuda = False
|
| 248 |
+
self.buffers: List[ir.Buffer] = []
|
| 249 |
+
self.const_output_index: Dict[str, int] = (
|
| 250 |
+
const_output_index if const_output_index else {}
|
| 251 |
+
)
|
| 252 |
+
self.folded_constants: Set[str] = (
|
| 253 |
+
set(const_output_index.keys()) if const_output_index else set()
|
| 254 |
+
)
|
| 255 |
+
self.constants: Dict[str, torch.Tensor] = (
|
| 256 |
+
const_module.constants if const_module else {}
|
| 257 |
+
)
|
| 258 |
+
self.constant_reprs: Dict[str, str] = {}
|
| 259 |
+
self.removed_buffers: Set[str] = set()
|
| 260 |
+
self.removed_inplace_buffers: Set[str] = set()
|
| 261 |
+
self.mutated_buffers: Set[str] = set()
|
| 262 |
+
self.never_reuse_buffers: Set[str] = set()
|
| 263 |
+
self.inplaced_to_remove: Set[str] = set()
|
| 264 |
+
self.device_ops: DeviceOpOverrides = None # type: ignore[assignment]
|
| 265 |
+
self.wrapper_code: WrapperCodeGen = None # type: ignore[assignment]
|
| 266 |
+
# See `ProxyExecutor Design Note` in ir.py for more details
|
| 267 |
+
self.extern_kernel_nodes: List[ir.ExternKernelNode] = []
|
| 268 |
+
self.extern_node_serializer: Optional[
|
| 269 |
+
Callable[[List[ir.ExternKernelNode]], Any]
|
| 270 |
+
] = extern_node_serializer
|
| 271 |
+
self.current_node: torch.fx.Node = None # type: ignore[assignment]
|
| 272 |
+
self.num_static_inputs = num_static_inputs
|
| 273 |
+
self.lists: Dict[str, List[str]] = {}
|
| 274 |
+
self.mutated_inputs: Set[str] = set()
|
| 275 |
+
self.mutated_input_idxs: List[int] = []
|
| 276 |
+
self.name_to_buffer: Dict[str, ir.Buffer] = {}
|
| 277 |
+
self.name_to_users: DefaultDict[str, List[ir.IRNode]] = defaultdict(list)
|
| 278 |
+
self.creation_time = time.time()
|
| 279 |
+
self.name = name
|
| 280 |
+
self.cpp_wrapper = cpp_wrapper
|
| 281 |
+
|
| 282 |
+
# record multi_kernel choice for cpp_wrapper so the second pass knows
|
| 283 |
+
# which sub-kernel is picked. Copy cpp_wrapper to another variable
|
| 284 |
+
# since cpp_wrapper flag is set to false for the first pass of codegen.
|
| 285 |
+
self.record_multi_kernel_choice = cpp_wrapper
|
| 286 |
+
self.multi_kernel_to_choice: Dict[str, int] = {}
|
| 287 |
+
|
| 288 |
+
self.aot_mode = aot_mode
|
| 289 |
+
self.graph_id = graph_id
|
| 290 |
+
self.scheduler: "torch._inductor.scheduler.Scheduler" = None # type: ignore[assignment]
|
| 291 |
+
self.nodes_prefer_channels_last = (
|
| 292 |
+
self.find_nodes_prefer_channels_last() if self.layout_opt else set()
|
| 293 |
+
)
|
| 294 |
+
self._warned_fallback = {"aten.convolution_backward"}
|
| 295 |
+
self.user_visible_outputs = user_visible_outputs
|
| 296 |
+
self.cache_key: str = "" # This is the cache key for the compiled artifact
|
| 297 |
+
self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
|
| 298 |
+
self.cache_linemap: List[
|
| 299 |
+
Tuple[int, str]
|
| 300 |
+
] = (
|
| 301 |
+
[]
|
| 302 |
+
) # This is the linemap used by the profiler to mark custom compiled kernels getting run
|
| 303 |
+
# Used if lowering encounters cases where cudagraphs are not supported
|
| 304 |
+
self.disable_cudagraphs_reason: Optional[str] = None
|
| 305 |
+
|
| 306 |
+
# only keeping one node per device for stack trace purposes
|
| 307 |
+
self.device_node_mapping: Dict[torch.device, torch.fx.Node] = {}
|
| 308 |
+
self.orig_gm: torch.fx.GraphModule = gm.__copy__()
|
| 309 |
+
self.dynamo_flat_name_to_original_fqn = self.module.meta.get(
|
| 310 |
+
"dynamo_flat_name_to_original_fqn", {}
|
| 311 |
+
)
|
| 312 |
+
self.allocated_constant_name = (
|
| 313 |
+
const_module.allocated_constant_name if const_module is not None else {}
|
| 314 |
+
)
|
| 315 |
+
self.init_backend_registration()
|
| 316 |
+
|
| 317 |
+
@staticmethod
|
| 318 |
+
def decide_layout_opt(gm, *, is_inference) -> bool:
|
| 319 |
+
"""
|
| 320 |
+
Decide if we should enable layout optimization for this graph based on
|
| 321 |
+
heuristics.
|
| 322 |
+
"""
|
| 323 |
+
if not config.layout_optimization:
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
if config.force_layout_optimization:
|
| 327 |
+
return True
|
| 328 |
+
|
| 329 |
+
conv_nodes = [
|
| 330 |
+
n for n in gm.graph.nodes if n.target == torch.ops.aten.convolution.default
|
| 331 |
+
]
|
| 332 |
+
nconv = len(conv_nodes)
|
| 333 |
+
|
| 334 |
+
if nconv == 0:
|
| 335 |
+
return False
|
| 336 |
+
|
| 337 |
+
# For cpu backend and mkldnn enabled, we always use channels_last for better performance.
|
| 338 |
+
if (
|
| 339 |
+
torch.backends.mkldnn.enabled
|
| 340 |
+
and torch.backends.mkldnn.is_available()
|
| 341 |
+
and all(
|
| 342 |
+
n.args[idx].meta["val"].device == torch.device("cpu")
|
| 343 |
+
for n in conv_nodes
|
| 344 |
+
for idx in [0, 1]
|
| 345 |
+
)
|
| 346 |
+
):
|
| 347 |
+
return True
|
| 348 |
+
|
| 349 |
+
# Following models are skipped due to this:
|
| 350 |
+
# jx_nest_base
|
| 351 |
+
# volo_d1_224
|
| 352 |
+
if len(list(gm.graph.nodes)) >= 300 * nconv:
|
| 353 |
+
log.debug("Skipped layout opt because only a few conv")
|
| 354 |
+
return False
|
| 355 |
+
|
| 356 |
+
if any(
|
| 357 |
+
has_free_symbols(n.args[idx].meta["val"])
|
| 358 |
+
for n in conv_nodes
|
| 359 |
+
for idx in [0, 1]
|
| 360 |
+
):
|
| 361 |
+
log.debug(
|
| 362 |
+
"See perf regression with dynamic shape. Follow up in https://github.com/pytorch/pytorch/issues/102670"
|
| 363 |
+
)
|
| 364 |
+
return False
|
| 365 |
+
|
| 366 |
+
def is_grouped(n):
|
| 367 |
+
return n.args[-1] > 1 and n.args[1].meta["val"].size(1) > 1
|
| 368 |
+
|
| 369 |
+
def is_in_out_channel(n):
|
| 370 |
+
return (
|
| 371 |
+
n.args[1].meta["val"].size(0) * 2 <= n.args[1].meta["val"].size(1)
|
| 372 |
+
and n.args[1].meta["val"].size(2) > 1
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def is_small_channel(n):
|
| 376 |
+
return (
|
| 377 |
+
n.args[1].meta["val"].size(0) <= 64
|
| 378 |
+
and n.args[1].meta["val"].size(1) <= 64
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
# only grouped convolutions benchmarked as slower in conv samples for inference only
|
| 382 |
+
if is_inference:
|
| 383 |
+
from torch.utils.flop_counter import FlopCounterMode
|
| 384 |
+
|
| 385 |
+
flop_counts: Dict[str, float] = defaultdict(float)
|
| 386 |
+
for node in conv_nodes:
|
| 387 |
+
success, args, kwargs = torch._inductor.fx_utils.get_fake_args_kwargs(
|
| 388 |
+
node
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
if success:
|
| 392 |
+
with FlopCounterMode(display=False) as flop_counter_mode:
|
| 393 |
+
with V.fake_mode:
|
| 394 |
+
node.target(*args, **kwargs)
|
| 395 |
+
|
| 396 |
+
counted_flops = flop_counter_mode.get_total_flops()
|
| 397 |
+
if is_grouped(node):
|
| 398 |
+
node_type = "grouped"
|
| 399 |
+
elif is_small_channel(node):
|
| 400 |
+
node_type = "small"
|
| 401 |
+
elif is_in_out_channel(node):
|
| 402 |
+
node_type = "in_out"
|
| 403 |
+
else:
|
| 404 |
+
node_type = "default"
|
| 405 |
+
|
| 406 |
+
flop_counts[node_type] += counted_flops
|
| 407 |
+
else:
|
| 408 |
+
log.debug("Conv inputs meta not found")
|
| 409 |
+
|
| 410 |
+
# average benchmarked channels last speedup / slowdown, < 1 is speedup.
|
| 411 |
+
# taken from the set of convolution inputs in benchmarks/dynamo/microbenchmarks/operator_inp_logs/torchbench_train/
|
| 412 |
+
# To regenerate these numbers follow https://gist.github.com/eellison/55d7a6ed6f39829d68ac56f95f4df5bb
|
| 413 |
+
GROUPED_MULTIPLIER = 1.358
|
| 414 |
+
DEFAULT_MULTIPLIER = 0.823
|
| 415 |
+
IN_OUT_MULTIPLIER = 0.725
|
| 416 |
+
SMALL_MULTIPLIER = 0.783
|
| 417 |
+
|
| 418 |
+
total_flops = sum(flop_counts.values())
|
| 419 |
+
# TODO - get different values per hardware
|
| 420 |
+
weighted_flops = (
|
| 421 |
+
flop_counts["grouped"] * GROUPED_MULTIPLIER
|
| 422 |
+
+ flop_counts["small"] * SMALL_MULTIPLIER
|
| 423 |
+
+ flop_counts["in_out"] * IN_OUT_MULTIPLIER
|
| 424 |
+
+ flop_counts["default"] * DEFAULT_MULTIPLIER
|
| 425 |
+
)
|
| 426 |
+
do_layout_opt = weighted_flops <= total_flops
|
| 427 |
+
if not do_layout_opt:
|
| 428 |
+
log.debug(
|
| 429 |
+
"Skipped layout opt in inference because weighted flops indicate slowdown, default: %d, channels last: %d",
|
| 430 |
+
total_flops,
|
| 431 |
+
weighted_flops,
|
| 432 |
+
)
|
| 433 |
+
return do_layout_opt
|
| 434 |
+
|
| 435 |
+
# Channels last layout can dramatically hurt grouped conv perf. E.g.
|
| 436 |
+
# Conv with arguments like
|
| 437 |
+
# {"input_shape": [32, 224, 112, 112], "weight_shape": [224, 112, 3, 3],
|
| 438 |
+
# "stride": [2, 2], "padding": [1, 1], "groups": 2}
|
| 439 |
+
# slows down 31x using channels last..
|
| 440 |
+
|
| 441 |
+
# But a lot of timm models use depthwise separable convolution which will
|
| 442 |
+
# result in grouped convolution with in-channel size == 1.
|
| 443 |
+
# For those grouped convolution, channels last still helps a lot.
|
| 444 |
+
# E.g.
|
| 445 |
+
# Conv with arguments
|
| 446 |
+
# {"input_shape": [128, 58, 56, 56], "weight_shape": [58, 1, 3, 3],
|
| 447 |
+
# "stride": [2, 2], "padding": [1, 1], "groups": 58}
|
| 448 |
+
# get 1.86x speedup with channels last layout.
|
| 449 |
+
#
|
| 450 |
+
# The following heuristics skip using channels-last if the model contains
|
| 451 |
+
# grouped convolution with in-channels > 1.
|
| 452 |
+
if any(map(is_grouped, conv_nodes)):
|
| 453 |
+
log.debug(
|
| 454 |
+
"Skip layout opt because found grouped convolution with >1 in_channels!"
|
| 455 |
+
)
|
| 456 |
+
return False
|
| 457 |
+
|
| 458 |
+
# For some models that contain convolution with larger in-channel than out-channel, applying
|
| 459 |
+
# channels last hurts performance.
|
| 460 |
+
# Following models are skipped due to this:
|
| 461 |
+
# - pytorch_unet
|
| 462 |
+
# - phlippe_densenet (slightly worse)
|
| 463 |
+
# - Background_Matting (1.22x -> 0.821x)
|
| 464 |
+
# - pytorch_CycleGAN_and_pix2pix (1.597x -> 1.294x)
|
| 465 |
+
if any(map(is_in_out_channel, conv_nodes)):
|
| 466 |
+
log.debug(
|
| 467 |
+
"Skip layout opt because some convolutions have smaller out_channel"
|
| 468 |
+
)
|
| 469 |
+
return False
|
| 470 |
+
|
| 471 |
+
# Following models are skipped due to this:
|
| 472 |
+
# - functorch_maml_omniglot
|
| 473 |
+
if all(map(is_small_channel, conv_nodes)):
|
| 474 |
+
log.debug("Skip layout opt because all convolution channels are too small")
|
| 475 |
+
return False
|
| 476 |
+
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
def qualify_name(self, name: str) -> str:
|
| 480 |
+
"""Prepend the given name with the graph name if any."""
|
| 481 |
+
if self.name is not None:
|
| 482 |
+
return f"{self.name}_{name}"
|
| 483 |
+
return name
|
| 484 |
+
|
| 485 |
+
def make_subgraph(
|
| 486 |
+
self,
|
| 487 |
+
gm: torch.fx.GraphModule,
|
| 488 |
+
example_inputs: List[torch.Tensor],
|
| 489 |
+
subgraph_name: str,
|
| 490 |
+
) -> "GraphLowering":
|
| 491 |
+
"""
|
| 492 |
+
Make a subgraph of the current graph with all inherited
|
| 493 |
+
parts, except the graph module (`gm`) and `example_inputs`.
|
| 494 |
+
The subgraphs are lowered separately, but intended to be
|
| 495 |
+
inlined in the parent graph's codegening. Hence the need
|
| 496 |
+
for maintaining the same `shape_env` and other properties.
|
| 497 |
+
The subgraph name is qualified by the parent graph's name.
|
| 498 |
+
"""
|
| 499 |
+
return GraphLowering(
|
| 500 |
+
gm=gm,
|
| 501 |
+
example_inputs=example_inputs,
|
| 502 |
+
shape_env=self._shape_env,
|
| 503 |
+
cpp_wrapper=self.cpp_wrapper,
|
| 504 |
+
aot_mode=self.aot_mode,
|
| 505 |
+
extern_node_serializer=self.extern_node_serializer,
|
| 506 |
+
is_inference=self.is_inference,
|
| 507 |
+
name=self.qualify_name(subgraph_name),
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
def find_nodes_prefer_channels_last(self):
|
| 511 |
+
"""
|
| 512 |
+
The rule to decide if an node prefer channels last is simple.
|
| 513 |
+
1. if it's input/output of a convolution
|
| 514 |
+
2. if one of its user prefers channels last
|
| 515 |
+
|
| 516 |
+
We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs;
|
| 517 |
+
Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers
|
| 518 |
+
channels last.
|
| 519 |
+
|
| 520 |
+
Consider the scenario: conv -> batch-norm -> relu -> conv
|
| 521 |
+
Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies:
|
| 522 |
+
1. the output of batch-norm should be channels last initially since its input is a conv's output.
|
| 523 |
+
Forcing the batch-norm's output to be contiguous results in the first copy
|
| 524 |
+
2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output.
|
| 525 |
+
We need convert it to channels last layout which results in the second copy.
|
| 526 |
+
With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies
|
| 527 |
+
can be saved.
|
| 528 |
+
"""
|
| 529 |
+
output_set = set()
|
| 530 |
+
for n in reversed(self.module.graph.nodes):
|
| 531 |
+
if n.target == torch.ops.aten.convolution.default:
|
| 532 |
+
output_set.add(n)
|
| 533 |
+
continue
|
| 534 |
+
|
| 535 |
+
for user in n.users:
|
| 536 |
+
if user in output_set:
|
| 537 |
+
output_set.add(n)
|
| 538 |
+
break
|
| 539 |
+
|
| 540 |
+
# need a second pass to add downstream nodes of those channel last nodes to the sets.
|
| 541 |
+
# This pass is especially needed to avoid mix-layout kernel inputs in backward pass.
|
| 542 |
+
#
|
| 543 |
+
# Let's say a conv-batchnorm 's output is passed to relu whose output is in turn returned
|
| 544 |
+
# from the fwd graph. Without this second pass, we will force relu's output to be contiguous.
|
| 545 |
+
# Then in the kernel in backward pass, the contiguous output of relu may be mix with other channels last
|
| 546 |
+
# tensors and passed to a kernel.
|
| 547 |
+
#
|
| 548 |
+
# This pass improve yolov3 training speedup from 1.116x (worse than disabling layout optimization speedup 1.196x) to 1.457x.
|
| 549 |
+
# It also improves dla102 training speedup from 1.240x (worse than disabling layout optimization speedup 1.523x) to 1.835x .
|
| 550 |
+
# This also helps the following models:
|
| 551 |
+
# - res2net101_26w_4s
|
| 552 |
+
# - res2net50_14w_8s
|
| 553 |
+
# - sebotnet33ts_256
|
| 554 |
+
for n in self.module.graph.nodes:
|
| 555 |
+
if n in output_set:
|
| 556 |
+
for child in n.users:
|
| 557 |
+
output_set.add(child)
|
| 558 |
+
|
| 559 |
+
return output_set
|
| 560 |
+
|
| 561 |
+
def warn_fallback(self, name):
|
| 562 |
+
if name not in self._warned_fallback:
|
| 563 |
+
self._warned_fallback.add(name)
|
| 564 |
+
perf_hint_log.info("Using FallbackKernel: %s", name)
|
| 565 |
+
|
| 566 |
+
def add_device_info(self, device: torch.device):
|
| 567 |
+
self.device_types.add(device.type)
|
| 568 |
+
if device.index is not None:
|
| 569 |
+
self.device_idxs.add(device.index)
|
| 570 |
+
if V.graph.current_node and device not in self.device_node_mapping:
|
| 571 |
+
self.device_node_mapping[device] = V.graph.current_node
|
| 572 |
+
|
| 573 |
+
@property
|
| 574 |
+
def fake_mode(self):
|
| 575 |
+
return V.fake_mode
|
| 576 |
+
|
| 577 |
+
def get_buffer(self, buffer_name: str):
|
| 578 |
+
if buffer_name in self.name_to_buffer:
|
| 579 |
+
return self.name_to_buffer[buffer_name]
|
| 580 |
+
if buffer_name in self.graph_inputs:
|
| 581 |
+
return self.graph_inputs[buffer_name]
|
| 582 |
+
return None
|
| 583 |
+
|
| 584 |
+
def get_dtype(self, buffer_name: str):
|
| 585 |
+
if buffer_name in self.constants:
|
| 586 |
+
return self.constants[buffer_name].dtype
|
| 587 |
+
if buffer_name in self.name_to_buffer:
|
| 588 |
+
return self.name_to_buffer[buffer_name].get_dtype()
|
| 589 |
+
if buffer_name in self.graph_inputs:
|
| 590 |
+
return self.graph_inputs[buffer_name].get_dtype()
|
| 591 |
+
m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
|
| 592 |
+
if m:
|
| 593 |
+
return self.get_dtype(m.group(1))
|
| 594 |
+
raise KeyError(f"could not find {buffer_name}")
|
| 595 |
+
|
| 596 |
+
def get_numel(self, buffer_name: str):
|
| 597 |
+
from .ir import MultiOutputLayout
|
| 598 |
+
|
| 599 |
+
if buffer_name in self.constants:
|
| 600 |
+
return self.constants[buffer_name].numel()
|
| 601 |
+
if buffer_name in self.name_to_buffer:
|
| 602 |
+
buf = self.name_to_buffer[buffer_name]
|
| 603 |
+
if isinstance(getattr(buf, "layout", None), MultiOutputLayout):
|
| 604 |
+
return 1
|
| 605 |
+
return buf.get_numel()
|
| 606 |
+
if buffer_name in self.graph_inputs:
|
| 607 |
+
return self.graph_inputs[buffer_name].get_numel()
|
| 608 |
+
raise KeyError(f"could not find {buffer_name}")
|
| 609 |
+
|
| 610 |
+
@dynamo_timed
|
| 611 |
+
def run(self, *args):
|
| 612 |
+
return super().run(*args)
|
| 613 |
+
|
| 614 |
+
def register_buffer(self, buffer: ir.Buffer):
|
| 615 |
+
name = self.qualify_name(f"buf{len(self.buffers)}")
|
| 616 |
+
self.buffers.append(buffer)
|
| 617 |
+
self.name_to_buffer[name] = buffer
|
| 618 |
+
# Skip empty CPU tensor so that CUDA graphs can succeed, see https://github.com/pytorch/pytorch/pull/114144
|
| 619 |
+
if not isinstance(buffer, ir.ComputedBuffer) or not buffer.is_zero_elements():
|
| 620 |
+
self.add_device_info(buffer.get_device())
|
| 621 |
+
return name
|
| 622 |
+
|
| 623 |
+
def register_list(self, buffer_names: List[str]):
|
| 624 |
+
name = self.qualify_name("list_" + "_".join(buffer_names))
|
| 625 |
+
self.lists[name] = buffer_names
|
| 626 |
+
return name
|
| 627 |
+
|
| 628 |
+
def register_users_of(self, node_output):
|
| 629 |
+
def register(value):
|
| 630 |
+
if isinstance(value, (list, tuple)):
|
| 631 |
+
for x in value:
|
| 632 |
+
register(x)
|
| 633 |
+
if isinstance(value, ir.IRNode):
|
| 634 |
+
if (
|
| 635 |
+
not hasattr(value, "data")
|
| 636 |
+
or not isinstance(value.data, ir.IRNode)
|
| 637 |
+
or not (
|
| 638 |
+
hasattr(value.data, "data")
|
| 639 |
+
and isinstance(value.data.data, ir.IRNode)
|
| 640 |
+
)
|
| 641 |
+
):
|
| 642 |
+
return
|
| 643 |
+
|
| 644 |
+
for read_name in value.get_read_names():
|
| 645 |
+
self.name_to_users[read_name].append(value)
|
| 646 |
+
|
| 647 |
+
register(node_output)
|
| 648 |
+
|
| 649 |
+
def mark_buffer_mutated(self, name: str):
|
| 650 |
+
"""
|
| 651 |
+
When a buffer is mutated we need to make sure all the reads to
|
| 652 |
+
the old version are realized before the mutation happens.
|
| 653 |
+
"""
|
| 654 |
+
assert isinstance(name, str)
|
| 655 |
+
self.mutated_buffers.add(name)
|
| 656 |
+
|
| 657 |
+
if name not in self.name_to_users:
|
| 658 |
+
return
|
| 659 |
+
|
| 660 |
+
for user in self.name_to_users[name]:
|
| 661 |
+
user.realize()
|
| 662 |
+
|
| 663 |
+
def add_tensor_constant(self, data, name=None):
|
| 664 |
+
def allocate(name):
|
| 665 |
+
if not config.aot_inductor.use_runtime_constant_folding:
|
| 666 |
+
for constant_name, value in self.constants.items():
|
| 667 |
+
if (
|
| 668 |
+
not data.is_mkldnn
|
| 669 |
+
and data.size() == value.size()
|
| 670 |
+
and data.stride() == value.stride()
|
| 671 |
+
and data.dtype == value.dtype
|
| 672 |
+
and data.device == value.device
|
| 673 |
+
and torch.eq(data, value).all()
|
| 674 |
+
):
|
| 675 |
+
return constant_name
|
| 676 |
+
|
| 677 |
+
if name is None:
|
| 678 |
+
name = f"constant{len(self.constants)}"
|
| 679 |
+
if name[0].isdigit():
|
| 680 |
+
name = f"constant_{name}"
|
| 681 |
+
name = self.qualify_name(name)
|
| 682 |
+
# We may generate a var name for each constant in the codegen.
|
| 683 |
+
# Let's only keep sane characters.
|
| 684 |
+
prefix = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
| 685 |
+
name = prefix
|
| 686 |
+
cnt = 0
|
| 687 |
+
while name in self.constants:
|
| 688 |
+
name = f"{prefix}_{cnt}"
|
| 689 |
+
cnt += 1
|
| 690 |
+
self.constants[name] = data
|
| 691 |
+
self.constant_reprs[name] = (
|
| 692 |
+
f"{data.device!r} {data.dtype!r} "
|
| 693 |
+
f"{tuple(data.size())!r} {tuple(data.stride())!r} "
|
| 694 |
+
f"{hash(data):x}"
|
| 695 |
+
)
|
| 696 |
+
return name
|
| 697 |
+
|
| 698 |
+
new_name = allocate(name)
|
| 699 |
+
self.allocated_constant_name[new_name] = name
|
| 700 |
+
|
| 701 |
+
return TensorBox.create(
|
| 702 |
+
ir.ConstantBuffer(
|
| 703 |
+
new_name,
|
| 704 |
+
FixedLayout(data.device, data.dtype, *self.static_sizes_strides(data)),
|
| 705 |
+
)
|
| 706 |
+
)
|
| 707 |
+
|
| 708 |
+
def constant_name(self, name: str, device_override: Optional[torch.device]):
|
| 709 |
+
"""
|
| 710 |
+
We AOT copy constants to the devices they are needed on.
|
| 711 |
+
If device_override doesn't match the constant's device, then
|
| 712 |
+
copy it and return a different name.
|
| 713 |
+
"""
|
| 714 |
+
if self.constants[name].device == device_override or device_override is None:
|
| 715 |
+
return name
|
| 716 |
+
alt_name = f"{name}_{device_override.type}{device_override.index or 0}"
|
| 717 |
+
if alt_name not in self.constants:
|
| 718 |
+
self.constants[alt_name] = self.constants[name].to(device_override)
|
| 719 |
+
return alt_name
|
| 720 |
+
|
| 721 |
+
def placeholder(self, target: str, args, kwargs):
|
| 722 |
+
example = super().placeholder(target, args, kwargs)
|
| 723 |
+
self.graph_input_names.append(target)
|
| 724 |
+
if isinstance(example, SymTypes):
|
| 725 |
+
expr = example.node.expr
|
| 726 |
+
self.graph_inputs[target] = expr
|
| 727 |
+
return expr
|
| 728 |
+
elif isinstance(example, (int, bool, float)):
|
| 729 |
+
expr = sympy.sympify(example)
|
| 730 |
+
self.graph_inputs[target] = expr
|
| 731 |
+
return expr
|
| 732 |
+
if isinstance(example, BackwardState):
|
| 733 |
+
# Ignored arg, must be unused
|
| 734 |
+
# Alternately we could filter this out in AotAutograd
|
| 735 |
+
return None
|
| 736 |
+
assert isinstance(example, torch.Tensor), example
|
| 737 |
+
# todo(chilli): We can remove the last check once we turn buffers into
|
| 738 |
+
# static shape tensors. That's a hack to workaround Inductor believing
|
| 739 |
+
# the buffer should be static but us passing in a fake tensor with
|
| 740 |
+
# symbolic shapes.
|
| 741 |
+
if not example._has_symbolic_sizes_strides:
|
| 742 |
+
# the first N inputs are weights
|
| 743 |
+
sizes, strides = self.static_sizes_strides(example)
|
| 744 |
+
else:
|
| 745 |
+
sizes, strides = self.symbolic_sizes_strides(example)
|
| 746 |
+
# TODO(jansel): handle input aliasing
|
| 747 |
+
target = self.qualify_name(target)
|
| 748 |
+
tensor = TensorBox.create(
|
| 749 |
+
InputBuffer(
|
| 750 |
+
target,
|
| 751 |
+
FixedLayout(example.device, example.dtype, sizes, strides),
|
| 752 |
+
)
|
| 753 |
+
)
|
| 754 |
+
self.graph_inputs[target] = tensor
|
| 755 |
+
self.graph_inputs_original[target] = tensor.data.data
|
| 756 |
+
self.add_device_info(example.device)
|
| 757 |
+
return tensor
|
| 758 |
+
|
| 759 |
+
def call_function(self, target, args, kwargs):
|
| 760 |
+
if target is operator.getitem and isinstance(args[0], (list, tuple, dict)):
|
| 761 |
+
return super().call_function(target, args, kwargs)
|
| 762 |
+
|
| 763 |
+
if hasattr(target, "_inductor_lowering_function"):
|
| 764 |
+
# passthrough lowerings from .pattern_matcher
|
| 765 |
+
return target(*args, **kwargs)
|
| 766 |
+
|
| 767 |
+
def get_custom_op_layout_constraints(target, args, kwargs):
|
| 768 |
+
# Custom operations that require preserving stride order
|
| 769 |
+
# which run through implicit fallback must constrain their
|
| 770 |
+
# arguments' fx strides
|
| 771 |
+
layout_constraint = None
|
| 772 |
+
if torch._C.Tag.needs_fixed_stride_order in target.tags:
|
| 773 |
+
# We have to set the current args because call_function will immediately
|
| 774 |
+
# evaluate this lowering after creating the fallback, without evaluating
|
| 775 |
+
# the layout constraint
|
| 776 |
+
args, kwargs = constrain_to_fx_strides(
|
| 777 |
+
self.current_node, *args, **kwargs
|
| 778 |
+
)
|
| 779 |
+
# Also register the layout constraint so when the fallback
|
| 780 |
+
# is used again, we can constrain the args to the same layout
|
| 781 |
+
layout_constraint = constrain_to_fx_strides
|
| 782 |
+
return layout_constraint, args, kwargs
|
| 783 |
+
|
| 784 |
+
if target not in lowerings:
|
| 785 |
+
assert isinstance(
|
| 786 |
+
target, torch._ops.OpOverload
|
| 787 |
+
), f"{target} is not an OpOverload"
|
| 788 |
+
base_name = target.name().split(".")[0]
|
| 789 |
+
if base_name in FALLBACK_ALLOW_LIST:
|
| 790 |
+
make_fallback(target)
|
| 791 |
+
elif config.implicit_fallbacks:
|
| 792 |
+
layout_constraint, args, kwargs = get_custom_op_layout_constraints(
|
| 793 |
+
target, args, kwargs
|
| 794 |
+
)
|
| 795 |
+
error = (
|
| 796 |
+
MissingOperatorWithDecomp
|
| 797 |
+
if get_decompositions([target])
|
| 798 |
+
else MissingOperatorWithoutDecomp
|
| 799 |
+
)
|
| 800 |
+
log.info(
|
| 801 |
+
"Creating implicit fallback for:\n%s",
|
| 802 |
+
error.operator_str(target, args, kwargs),
|
| 803 |
+
)
|
| 804 |
+
make_fallback(target, layout_constraint)
|
| 805 |
+
|
| 806 |
+
elif get_decompositions([target]):
|
| 807 |
+
# There isn't a good way to dynamically patch this in
|
| 808 |
+
# since AOT Autograd already ran. The error message tells
|
| 809 |
+
# the user how to fix it.
|
| 810 |
+
raise MissingOperatorWithDecomp(target, args, kwargs)
|
| 811 |
+
else:
|
| 812 |
+
raise MissingOperatorWithoutDecomp(target, args, kwargs)
|
| 813 |
+
|
| 814 |
+
try:
|
| 815 |
+
log.debug(" via %s", lowerings[target])
|
| 816 |
+
out = lowerings[target](*args, **kwargs)
|
| 817 |
+
return out
|
| 818 |
+
except Exception as e:
|
| 819 |
+
raise LoweringException(e, target, args, kwargs).with_traceback(
|
| 820 |
+
e.__traceback__
|
| 821 |
+
) from None
|
| 822 |
+
|
| 823 |
+
@staticmethod
|
| 824 |
+
def can_inline_constant(t: torch.Tensor) -> bool:
|
| 825 |
+
"""
|
| 826 |
+
True if this is a small constant attr that will be inlined.
|
| 827 |
+
"""
|
| 828 |
+
return len(t.shape) == 1 and t.shape[0] <= 8
|
| 829 |
+
|
| 830 |
+
def get_attr(self, target, args, kwargs):
|
| 831 |
+
# this is a constant
|
| 832 |
+
value = getattr_recursive(self.module, target)
|
| 833 |
+
|
| 834 |
+
if isinstance(value, torch.fx.GraphModule):
|
| 835 |
+
return ir.Subgraph(name=target, graph_module=value)
|
| 836 |
+
|
| 837 |
+
if (
|
| 838 |
+
config.aot_inductor.use_runtime_constant_folding
|
| 839 |
+
or config.always_keep_tensor_constants
|
| 840 |
+
or unsupported_output_tensor(value)
|
| 841 |
+
):
|
| 842 |
+
return self.add_tensor_constant(value, target)
|
| 843 |
+
|
| 844 |
+
with no_dispatch():
|
| 845 |
+
if value.shape == ():
|
| 846 |
+
return Constant(value.item(), value.dtype, value.device)
|
| 847 |
+
if self.can_inline_constant(value):
|
| 848 |
+
# tensor lowering has constant inlining logic
|
| 849 |
+
from .lowering import tensor
|
| 850 |
+
|
| 851 |
+
return tensor(value.tolist(), dtype=value.dtype, device=value.device)
|
| 852 |
+
|
| 853 |
+
return self.add_tensor_constant(value, target)
|
| 854 |
+
|
| 855 |
+
def call_module(self, target, args, kwargs):
|
| 856 |
+
raise AssertionError()
|
| 857 |
+
|
| 858 |
+
def call_method(self, target, args, kwargs):
|
| 859 |
+
raise AssertionError()
|
| 860 |
+
|
| 861 |
+
def output(self, target, args, kwargs):
|
| 862 |
+
result = super().output(target, args, kwargs)
|
| 863 |
+
assert isinstance(result, (tuple, list)), type(result)
|
| 864 |
+
assert all(
|
| 865 |
+
isinstance(
|
| 866 |
+
x,
|
| 867 |
+
(
|
| 868 |
+
TensorBox,
|
| 869 |
+
ir.Constant,
|
| 870 |
+
type(None),
|
| 871 |
+
ir.ConstantBuffer,
|
| 872 |
+
sympy.Expr,
|
| 873 |
+
sympy.logic.boolalg.Boolean,
|
| 874 |
+
int,
|
| 875 |
+
),
|
| 876 |
+
)
|
| 877 |
+
for x in result
|
| 878 |
+
), result
|
| 879 |
+
self.graph_outputs = [ir.ExternKernel.realize_input(x) for x in result]
|
| 880 |
+
value: ir.IRNode
|
| 881 |
+
for name, value in self.graph_inputs.items():
|
| 882 |
+
assert isinstance(
|
| 883 |
+
value, (TensorBox, sympy.Expr)
|
| 884 |
+
), f"Unsupported inductor graph input type: {type(value)}"
|
| 885 |
+
if not isinstance(value, TensorBox):
|
| 886 |
+
continue
|
| 887 |
+
value.realize()
|
| 888 |
+
assert isinstance(value, TensorBox)
|
| 889 |
+
value = value.data
|
| 890 |
+
assert isinstance(value, ir.StorageBox)
|
| 891 |
+
value_storage_box = value
|
| 892 |
+
value = value.data
|
| 893 |
+
if not isinstance(value, InputBuffer) or value.get_name() != name:
|
| 894 |
+
# one of our inputs was mutated, need to turn that into a copy
|
| 895 |
+
ir.MutationLayout.realize_into(value, self.graph_inputs_original[name])
|
| 896 |
+
# replace output with mutated input
|
| 897 |
+
try:
|
| 898 |
+
ind = self.graph_outputs.index(value_storage_box)
|
| 899 |
+
self.graph_outputs[ind] = self.graph_inputs_original[name]
|
| 900 |
+
except ValueError:
|
| 901 |
+
pass
|
| 902 |
+
|
| 903 |
+
self.finalize()
|
| 904 |
+
log.debug(
|
| 905 |
+
"Force channels last inputs for %d conv for the current graph with id %d",
|
| 906 |
+
self.num_channels_last_conv,
|
| 907 |
+
self.graph_id if self.graph_id is not None else -1,
|
| 908 |
+
)
|
| 909 |
+
|
| 910 |
+
def finalize(self):
|
| 911 |
+
for buf in self.buffers:
|
| 912 |
+
buf.decide_layout()
|
| 913 |
+
|
| 914 |
+
@contextmanager
|
| 915 |
+
def set_current_node(self, node: torch.fx.Node):
|
| 916 |
+
old = self.current_node
|
| 917 |
+
try:
|
| 918 |
+
self.current_node = node
|
| 919 |
+
yield
|
| 920 |
+
finally:
|
| 921 |
+
self.current_node = old
|
| 922 |
+
|
| 923 |
+
def run_node(self, n: torch.fx.Node):
|
| 924 |
+
def debug(msg):
|
| 925 |
+
log.debug("lowering %s %s", LazyString(n.format_node), msg)
|
| 926 |
+
|
| 927 |
+
origins = {n}
|
| 928 |
+
if n.op == "call_function":
|
| 929 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 930 |
+
origins |= gather_origins(args, kwargs)
|
| 931 |
+
with ir.IRNode.current_origins(origins), self.set_current_node(
|
| 932 |
+
n
|
| 933 |
+
), V.set_current_node(n):
|
| 934 |
+
if (
|
| 935 |
+
n.op == "call_function"
|
| 936 |
+
and n.target is not operator.getitem
|
| 937 |
+
and fallback_node_due_to_unsupported_type(n)
|
| 938 |
+
):
|
| 939 |
+
debug("fallback_handler")
|
| 940 |
+
result = fallback_handler(n.target, add_to_fallback_set=False)(
|
| 941 |
+
*args, **kwargs # type: ignore[possibly-undefined]
|
| 942 |
+
)
|
| 943 |
+
elif n.op == "call_function" and n.target in layout_constraints:
|
| 944 |
+
debug("layout_constraints")
|
| 945 |
+
args, kwargs = layout_constraints[n.target](n, *args, **kwargs) # type: ignore[index]
|
| 946 |
+
result = self.call_function(n.target, args, kwargs)
|
| 947 |
+
elif is_magic_method(n.target):
|
| 948 |
+
# TODO: this is sus, it probably should be handled in the
|
| 949 |
+
# lowerings themselves similarly to sym_size/sym-stride
|
| 950 |
+
debug("is_magic_method")
|
| 951 |
+
if isinstance(n.meta["val"], torch.SymInt):
|
| 952 |
+
result = n.meta["val"].node.expr
|
| 953 |
+
else:
|
| 954 |
+
result = super().run_node(n)
|
| 955 |
+
else:
|
| 956 |
+
debug("")
|
| 957 |
+
result = super().run_node(n)
|
| 958 |
+
|
| 959 |
+
# require the same stride order for dense outputs,
|
| 960 |
+
# 1. user-land view() will not throw because inductor
|
| 961 |
+
# output different strides than eager
|
| 962 |
+
# long term the solution is to make view() always succeed
|
| 963 |
+
# with infallible strides.
|
| 964 |
+
# 2: as_strided ops, we need make sure its input has same size/stride with
|
| 965 |
+
# eager model to align with eager behavior.
|
| 966 |
+
as_strided_ops = [
|
| 967 |
+
torch.ops.aten.as_strided.default,
|
| 968 |
+
torch.ops.aten.as_strided_.default,
|
| 969 |
+
torch.ops.aten.as_strided_scatter.default,
|
| 970 |
+
]
|
| 971 |
+
is_output = any(user.op == "output" for user in n.users)
|
| 972 |
+
is_input_for_as_strided = any(
|
| 973 |
+
user.target in as_strided_ops for user in n.users
|
| 974 |
+
)
|
| 975 |
+
if (
|
| 976 |
+
is_output
|
| 977 |
+
and isinstance(result, TensorBox)
|
| 978 |
+
and isinstance(result.data, ir.BaseView)
|
| 979 |
+
):
|
| 980 |
+
# Realize so that outputs are correctly aliased
|
| 981 |
+
result.realize()
|
| 982 |
+
|
| 983 |
+
if (is_output or is_input_for_as_strided) and isinstance(
|
| 984 |
+
n.meta["val"], torch.Tensor
|
| 985 |
+
):
|
| 986 |
+
strides = n.meta["val"].stride()
|
| 987 |
+
dense = torch._prims_common.is_non_overlapping_and_dense(n.meta["val"])
|
| 988 |
+
# requiring a stride order for a non-dense output wouldn't
|
| 989 |
+
# recreate the same strides, and would fail with view, defer for now.
|
| 990 |
+
if dense and len(strides):
|
| 991 |
+
stride_order = ir.get_stride_order(strides)
|
| 992 |
+
if (
|
| 993 |
+
len(result.get_size()) == 4
|
| 994 |
+
and n in self.nodes_prefer_channels_last
|
| 995 |
+
and n.name not in self.user_visible_outputs
|
| 996 |
+
and not is_input_for_as_strided
|
| 997 |
+
):
|
| 998 |
+
stride_order = ir.NHWC_STRIDE_ORDER
|
| 999 |
+
result = ir.ExternKernel.require_stride_order(result, stride_order)
|
| 1000 |
+
|
| 1001 |
+
# Realize if (1) any user need inputs realized, or (2) there is
|
| 1002 |
+
# already too many reads and rematerializing can be bad.
|
| 1003 |
+
num_users = len(set(n.users))
|
| 1004 |
+
if num_users > 1 and isinstance(result, TensorBox):
|
| 1005 |
+
for user in n.users:
|
| 1006 |
+
if user.target in needs_realized_inputs:
|
| 1007 |
+
result.realize_hint()
|
| 1008 |
+
# This inclusion is somewhat controversial (from
|
| 1009 |
+
# discussion between Horace, Natalia, and Elias).
|
| 1010 |
+
# Currently, it's not very clear why this is helpful.
|
| 1011 |
+
# The general idea here is that even though a node may
|
| 1012 |
+
# have FlexibleLayout, we still often *treat* it as if
|
| 1013 |
+
# it was contiguous. This appears to sometimes result in
|
| 1014 |
+
# suboptimal behavior.
|
| 1015 |
+
#
|
| 1016 |
+
# When we do a better job selecting layout, we should
|
| 1017 |
+
# revisit this.
|
| 1018 |
+
need_fixed_layout = [
|
| 1019 |
+
torch.ops.aten.convolution_backward.default,
|
| 1020 |
+
torch.ops.aten.mm.default,
|
| 1021 |
+
torch.ops.aten._int_mm.default,
|
| 1022 |
+
]
|
| 1023 |
+
if not self.layout_opt:
|
| 1024 |
+
need_fixed_layout.append(torch.ops.aten.convolution.default)
|
| 1025 |
+
if torch._C._has_mkldnn:
|
| 1026 |
+
need_fixed_layout += [
|
| 1027 |
+
torch.ops.mkldnn._convolution_pointwise.default,
|
| 1028 |
+
torch.ops.mkldnn._convolution_pointwise.binary,
|
| 1029 |
+
torch.ops.mkldnn._convolution_pointwise_.binary,
|
| 1030 |
+
torch.ops.mkldnn._convolution_transpose_pointwise.default,
|
| 1031 |
+
torch.ops.mkldnn._linear_pointwise.default,
|
| 1032 |
+
torch.ops.mkldnn._linear_pointwise.binary,
|
| 1033 |
+
torch.ops.aten.mkldnn_rnn_layer.default,
|
| 1034 |
+
torch.ops.onednn.qconv2d_pointwise.default,
|
| 1035 |
+
torch.ops.onednn.qconv2d_pointwise.binary,
|
| 1036 |
+
torch.ops.onednn.qlinear_pointwise.default,
|
| 1037 |
+
torch.ops.onednn.qlinear_pointwise.tensor,
|
| 1038 |
+
]
|
| 1039 |
+
if torch._C.has_mkl:
|
| 1040 |
+
need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
|
| 1041 |
+
if user.target in need_fixed_layout:
|
| 1042 |
+
result = ir.ExternKernel.require_stride_order(
|
| 1043 |
+
result, ir.get_stride_order(n.meta["val"].stride())
|
| 1044 |
+
)
|
| 1045 |
+
if user.op == "output":
|
| 1046 |
+
if isinstance(result.data.data, (Pointwise, Reduction)):
|
| 1047 |
+
result.realize()
|
| 1048 |
+
|
| 1049 |
+
# TODO(jansel): introduce a store vs inline choice
|
| 1050 |
+
result.mark_reuse(len(n.users))
|
| 1051 |
+
|
| 1052 |
+
# Realize if the IRNode already has accumulated lots of reads
|
| 1053 |
+
if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
|
| 1054 |
+
# Prevent excessive accumulation in a computed buffer, when
|
| 1055 |
+
# there are multiple branches each with small number of memory
|
| 1056 |
+
# reads, but they converge to a user.
|
| 1057 |
+
result.realize_hint()
|
| 1058 |
+
|
| 1059 |
+
# Realize if a Pointwise has too much stuff to be inlined.
|
| 1060 |
+
# As this may cause RecursionError during Inductor's evaluation.
|
| 1061 |
+
if isinstance(result, TensorBox) and isinstance(result.data, StorageBox):
|
| 1062 |
+
curr = result.data.data
|
| 1063 |
+
if isinstance(curr, Pointwise):
|
| 1064 |
+
# Use inner fn as a rough proxy. Good enough.
|
| 1065 |
+
if curr.has_large_inner_fn():
|
| 1066 |
+
result.realize()
|
| 1067 |
+
|
| 1068 |
+
# This is not complete, but it doesn't have to be: origin_node
|
| 1069 |
+
# tracking is best effort. The logic here critically relies on direct
|
| 1070 |
+
# TensorBox -> StorageBox denoting a non-view; we don't bother trying
|
| 1071 |
+
# to get views to work. Feel free to add any extra cases as needed.
|
| 1072 |
+
#
|
| 1073 |
+
# Note: we can't YOLO tree_map over this result, because if there are
|
| 1074 |
+
# buffers or a view involved, we might not be able to validly assign
|
| 1075 |
+
# the origin_node here.
|
| 1076 |
+
if isinstance(result, TensorBox) and isinstance(result.data, ir.StorageBox):
|
| 1077 |
+
if isinstance(result.data.data, ir.Loops):
|
| 1078 |
+
result.data.data.origin_node = n
|
| 1079 |
+
elif isinstance(result.data.data, ir.Buffer):
|
| 1080 |
+
result.data.data.origin_node = n
|
| 1081 |
+
if isinstance(result.data.data, ir.ComputedBuffer) and isinstance(
|
| 1082 |
+
result.data.data.data, ir.Loops
|
| 1083 |
+
):
|
| 1084 |
+
result.data.data.data.origin_node = n
|
| 1085 |
+
# Not really multi-output, can straightforwardly recurse in
|
| 1086 |
+
elif (
|
| 1087 |
+
isinstance(result.data.data, ir.MultiOutput)
|
| 1088 |
+
and not result.data.data.indices
|
| 1089 |
+
):
|
| 1090 |
+
if isinstance(result.data.data.inputs[0], ir.Buffer):
|
| 1091 |
+
result.data.data.inputs[0].origin_node = n
|
| 1092 |
+
|
| 1093 |
+
self.register_users_of(result)
|
| 1094 |
+
|
| 1095 |
+
return result
|
| 1096 |
+
|
| 1097 |
+
def validate_can_generate_cpp_wrapper(self):
|
| 1098 |
+
if config.disable_cpp_codegen:
|
| 1099 |
+
raise CppWrapperCodeGenError("C++ codegen is disabled")
|
| 1100 |
+
|
| 1101 |
+
if sys.platform not in ["linux", "darwin"]:
|
| 1102 |
+
raise CppWrapperCodeGenError(f"Unsupported platform {sys.platform}")
|
| 1103 |
+
|
| 1104 |
+
for value in self.graph_inputs.values():
|
| 1105 |
+
dtype = None
|
| 1106 |
+
if isinstance(value, TensorBox):
|
| 1107 |
+
dtype = value.get_dtype()
|
| 1108 |
+
elif isinstance(
|
| 1109 |
+
value, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
|
| 1110 |
+
):
|
| 1111 |
+
dtype = may_get_constant_buffer_dtype(value)
|
| 1112 |
+
|
| 1113 |
+
if not supported_dtype_of_cpp_wrapper(dtype, self.cuda):
|
| 1114 |
+
raise CppWrapperCodeGenError(f"Unsupported input dtype {dtype}")
|
| 1115 |
+
|
| 1116 |
+
def init_wrapper_code(self):
|
| 1117 |
+
self.cuda = "cuda" in self.device_types
|
| 1118 |
+
if self.cpp_wrapper:
|
| 1119 |
+
self.validate_can_generate_cpp_wrapper()
|
| 1120 |
+
self.wrapper_code = CppWrapperCuda() if self.cuda else CppWrapperCpu()
|
| 1121 |
+
else:
|
| 1122 |
+
device_types = self.device_types.copy()
|
| 1123 |
+
device_types.discard("cpu")
|
| 1124 |
+
# TODO(Eikan): Only support mixing cpu and other device now.
|
| 1125 |
+
assert len(device_types) <= 1, "Does not support mixing {}".format(
|
| 1126 |
+
"+".join(device_types)
|
| 1127 |
+
)
|
| 1128 |
+
only_cpu = len(device_types) == 0
|
| 1129 |
+
device_type = "cpu" if only_cpu else device_types.pop()
|
| 1130 |
+
|
| 1131 |
+
self.device_ops = get_device_op_overrides(device_type)
|
| 1132 |
+
wrapper_code_gen_cls = get_wrapper_codegen_for_device(device_type)
|
| 1133 |
+
assert (
|
| 1134 |
+
wrapper_code_gen_cls is not None
|
| 1135 |
+
), f"Device {device_type} not supported"
|
| 1136 |
+
self.wrapper_code = wrapper_code_gen_cls()
|
| 1137 |
+
|
| 1138 |
+
if self.const_module:
|
| 1139 |
+
# If we have const module, we could reuse the kernels
|
| 1140 |
+
# This could avoid duplication and save time on doing recompilation (if Triton.)
|
| 1141 |
+
self.wrapper_code._names_iter = self.const_module.wrapper_code._names_iter
|
| 1142 |
+
self.wrapper_code.src_to_kernel = (
|
| 1143 |
+
self.const_module.wrapper_code.src_to_kernel
|
| 1144 |
+
)
|
| 1145 |
+
|
| 1146 |
+
def codegen_with_cpp_wrapper(self):
|
| 1147 |
+
"""
|
| 1148 |
+
For CPU, the cpp wrapper codegen is done in one pass.
|
| 1149 |
+
For GPU, the cpp wrapper codegen is done in two steps: JIT-compile the model with python
|
| 1150 |
+
wrapper code and run it to generate autotuned kernel binaries in the first pass; and then
|
| 1151 |
+
generate cpp wrapper code and compile it to a dynamic library in the second pass.
|
| 1152 |
+
"""
|
| 1153 |
+
if "cuda" in self.device_types:
|
| 1154 |
+
# first pass
|
| 1155 |
+
self.cpp_wrapper = False
|
| 1156 |
+
compiled = self.compile_to_module().call
|
| 1157 |
+
|
| 1158 |
+
def materialize(x):
|
| 1159 |
+
if isinstance(x, (torch.SymInt, torch.SymFloat)):
|
| 1160 |
+
# Need concrete value to run dynamic shapes and tune the result
|
| 1161 |
+
return x.node.hint
|
| 1162 |
+
elif isinstance(x, FakeTensor):
|
| 1163 |
+
return defake(x)
|
| 1164 |
+
else:
|
| 1165 |
+
assert isinstance(
|
| 1166 |
+
x, torch.Tensor
|
| 1167 |
+
), "Unknown type when creating real inputs" + str(type(x))
|
| 1168 |
+
return x
|
| 1169 |
+
|
| 1170 |
+
if tracing_context := torch._guards.TracingContext.try_get():
|
| 1171 |
+
if tracing_context.output_strides:
|
| 1172 |
+
tracing_context.output_strides.clear()
|
| 1173 |
+
|
| 1174 |
+
params_flat = [
|
| 1175 |
+
param
|
| 1176 |
+
for param in tracing_context.params_flat # type: ignore[union-attr]
|
| 1177 |
+
if param is not None
|
| 1178 |
+
]
|
| 1179 |
+
real_inputs = [
|
| 1180 |
+
materialize(x) for x in itertools.chain(params_flat, V.real_inputs)
|
| 1181 |
+
]
|
| 1182 |
+
else:
|
| 1183 |
+
real_inputs = [materialize(x) for x in V.real_inputs]
|
| 1184 |
+
|
| 1185 |
+
with torch.utils._python_dispatch._disable_current_modes():
|
| 1186 |
+
assert self.example_inputs is not None
|
| 1187 |
+
compiled(real_inputs)
|
| 1188 |
+
del real_inputs
|
| 1189 |
+
|
| 1190 |
+
# second pass
|
| 1191 |
+
# TODO: reuse self.scheduler from the first pass to speed up the second pass
|
| 1192 |
+
self.cpp_wrapper = True
|
| 1193 |
+
self.removed_buffers.clear()
|
| 1194 |
+
self.inplaced_to_remove.clear()
|
| 1195 |
+
return self.codegen()
|
| 1196 |
+
else:
|
| 1197 |
+
# cpu
|
| 1198 |
+
return self.codegen()
|
| 1199 |
+
|
| 1200 |
+
def codegen(self):
|
| 1201 |
+
from .scheduler import Scheduler
|
| 1202 |
+
|
| 1203 |
+
self.init_wrapper_code()
|
| 1204 |
+
|
| 1205 |
+
self.scheduler = Scheduler(self.buffers)
|
| 1206 |
+
V.debug.draw_orig_fx_graph(self.orig_gm, self.scheduler.nodes)
|
| 1207 |
+
|
| 1208 |
+
self.scheduler.codegen()
|
| 1209 |
+
return self.wrapper_code.generate(self.is_inference)
|
| 1210 |
+
|
| 1211 |
+
def codegen_subgraph(self, parent_graph):
|
| 1212 |
+
"""
|
| 1213 |
+
This is a more compact version of the `codegen()` above
|
| 1214 |
+
where we codegen this graph as a subgraph of some parent
|
| 1215 |
+
graph. The parent graph is passed as an argument: the
|
| 1216 |
+
intention is to inline codegening of the subgraph in
|
| 1217 |
+
the parent graph's wrapper code (including the generated
|
| 1218 |
+
kerenls). The wrapper code is not finalized (via `.generate()`
|
| 1219 |
+
call), as this will be done in the parent graph's `codegen()`.
|
| 1220 |
+
"""
|
| 1221 |
+
from .scheduler import Scheduler
|
| 1222 |
+
|
| 1223 |
+
self.wrapper_code = parent_graph.wrapper_code
|
| 1224 |
+
self.device_ops = parent_graph.device_ops
|
| 1225 |
+
self.cpp_wrapper = parent_graph.cpp_wrapper
|
| 1226 |
+
|
| 1227 |
+
self.scheduler = Scheduler(self.buffers)
|
| 1228 |
+
self.scheduler.codegen()
|
| 1229 |
+
|
| 1230 |
+
def count_bytes(self):
|
| 1231 |
+
from .scheduler import Scheduler
|
| 1232 |
+
|
| 1233 |
+
scheduler = Scheduler(self.buffers)
|
| 1234 |
+
|
| 1235 |
+
total_bytes = 0
|
| 1236 |
+
node_counts = []
|
| 1237 |
+
node_runtimes = []
|
| 1238 |
+
for node in scheduler.nodes:
|
| 1239 |
+
num_bytes = node.get_read_write_buffers_sizes()
|
| 1240 |
+
total_bytes += num_bytes
|
| 1241 |
+
node_counts.append((node, num_bytes // 4))
|
| 1242 |
+
node_runtimes.append((node, node.get_estimated_runtime()))
|
| 1243 |
+
return total_bytes, node_counts, node_runtimes
|
| 1244 |
+
|
| 1245 |
+
@dynamo_timed(phase_name="code_gen")
|
| 1246 |
+
def compile_to_module(self):
|
| 1247 |
+
from .codecache import PyCodeCache
|
| 1248 |
+
|
| 1249 |
+
code, linemap = (
|
| 1250 |
+
self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()
|
| 1251 |
+
)
|
| 1252 |
+
linemap = [(line_no, node.stack_trace) for line_no, node in linemap]
|
| 1253 |
+
key, path = PyCodeCache.write(code)
|
| 1254 |
+
mod = PyCodeCache.load_by_key_path(
|
| 1255 |
+
key, path, linemap=linemap, attrs=self.constants
|
| 1256 |
+
)
|
| 1257 |
+
self.cache_key = key
|
| 1258 |
+
self.cache_path = path
|
| 1259 |
+
self.cache_linemap = linemap
|
| 1260 |
+
|
| 1261 |
+
# Logged twice as per https://github.com/pytorch/pytorch/pull/99038#discussion_r1167826029
|
| 1262 |
+
# TODO. Revisit this once the logging API is more mature
|
| 1263 |
+
assert mod.__file__ is not None
|
| 1264 |
+
|
| 1265 |
+
log_module_code(mod.__file__)
|
| 1266 |
+
log.debug("Output code written to: %s", mod.__file__)
|
| 1267 |
+
output_code_log.debug("Output code: \n%s", code)
|
| 1268 |
+
trace_structured(
|
| 1269 |
+
"inductor_output_code",
|
| 1270 |
+
lambda: {"filename": mod.__file__},
|
| 1271 |
+
payload_fn=lambda: code,
|
| 1272 |
+
)
|
| 1273 |
+
output_code_log.info("Output code written to: %s", mod.__file__)
|
| 1274 |
+
if config.benchmark_kernel:
|
| 1275 |
+
print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
|
| 1276 |
+
V.debug.output_code(mod.__file__)
|
| 1277 |
+
V.debug.copy(os.path.splitext(mod.__file__)[0] + ".debug")
|
| 1278 |
+
return mod
|
| 1279 |
+
|
| 1280 |
+
def compile_to_fn(self):
|
| 1281 |
+
if self.aot_mode:
|
| 1282 |
+
from .codecache import AotCodeCompiler
|
| 1283 |
+
|
| 1284 |
+
assert self.cpp_wrapper, "AOT mode only supports C++ wrapper"
|
| 1285 |
+
code, linemap = self.codegen_with_cpp_wrapper()
|
| 1286 |
+
output_code_log.debug("Output code: \n%s", code)
|
| 1287 |
+
|
| 1288 |
+
serialized_extern_kernel_nodes = None
|
| 1289 |
+
if (
|
| 1290 |
+
config.is_fbcode()
|
| 1291 |
+
and self.extern_kernel_nodes
|
| 1292 |
+
and self.extern_node_serializer
|
| 1293 |
+
):
|
| 1294 |
+
serialized_extern_kernel_nodes = self.extern_node_serializer(
|
| 1295 |
+
self.extern_kernel_nodes
|
| 1296 |
+
)
|
| 1297 |
+
output_code_log.debug(
|
| 1298 |
+
"Serialized Extern Kernel Nodes: \n%s",
|
| 1299 |
+
serialized_extern_kernel_nodes,
|
| 1300 |
+
)
|
| 1301 |
+
|
| 1302 |
+
# Directly return the file path with the compiled code
|
| 1303 |
+
return AotCodeCompiler.compile(
|
| 1304 |
+
self, code, serialized_extern_kernel_nodes, cuda=self.cuda
|
| 1305 |
+
)
|
| 1306 |
+
else:
|
| 1307 |
+
return self.compile_to_module().call
|
| 1308 |
+
|
| 1309 |
+
def get_output_names(self):
|
| 1310 |
+
return [
|
| 1311 |
+
node.get_name()
|
| 1312 |
+
for node in self.graph_outputs
|
| 1313 |
+
if not isinstance(node, ir.NoneAsConstantBuffer)
|
| 1314 |
+
and not isinstance(node, ir.ShapeAsConstantBuffer)
|
| 1315 |
+
]
|
| 1316 |
+
|
| 1317 |
+
def is_unspec_arg(self, name: str):
|
| 1318 |
+
# dynamo wraps unspec variable as 0d CPU tensor,
|
| 1319 |
+
# need to convert to scalar during codegen (triton only)
|
| 1320 |
+
return (
|
| 1321 |
+
name in self.graph_inputs.keys()
|
| 1322 |
+
and self.graph_inputs[name].get_numel() == 1
|
| 1323 |
+
and self.graph_inputs[name].get_device().type == "cpu"
|
| 1324 |
+
)
|
vila/lib/python3.10/site-packages/torch/_inductor/hooks.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
from typing import Callable, List, TYPE_CHECKING
|
| 3 |
+
|
| 4 |
+
if TYPE_CHECKING:
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
# Executed in the order they're registered
|
| 8 |
+
INTERMEDIATE_HOOKS: List[Callable[[str, "torch.Tensor"], None]] = []
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@contextlib.contextmanager
|
| 12 |
+
def intermediate_hook(fn):
|
| 13 |
+
INTERMEDIATE_HOOKS.append(fn)
|
| 14 |
+
try:
|
| 15 |
+
yield
|
| 16 |
+
finally:
|
| 17 |
+
INTERMEDIATE_HOOKS.pop()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def run_intermediate_hooks(name, val):
|
| 21 |
+
global INTERMEDIATE_HOOKS
|
| 22 |
+
hooks = INTERMEDIATE_HOOKS
|
| 23 |
+
INTERMEDIATE_HOOKS = []
|
| 24 |
+
try:
|
| 25 |
+
for hook in hooks:
|
| 26 |
+
hook(name, val)
|
| 27 |
+
finally:
|
| 28 |
+
INTERMEDIATE_HOOKS = hooks
|
vila/lib/python3.10/site-packages/torch/_inductor/index_propagation.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This file implements the IndexPropagation ops handler, which wraps an
|
| 2 |
+
underlying handler to add a limited form of constant propagation, as well as
|
| 3 |
+
propagation of sympy expressions downstream of ops.index_expr calls.
|
| 4 |
+
|
| 5 |
+
For example, say we have the IR:
|
| 6 |
+
|
| 7 |
+
tmp0 = ops.index_expr(x, torch.int32)
|
| 8 |
+
tmp1 = ops.constant(2, torch.int32)
|
| 9 |
+
tmp2 = ops.mul(tmp0, tmp1)
|
| 10 |
+
tmp3 = ops.indirect_indexing(tmp2, x_size)
|
| 11 |
+
tmp4 = ops.load("buf0", tmp3)
|
| 12 |
+
|
| 13 |
+
The underlying handler would just see:
|
| 14 |
+
|
| 15 |
+
ops.load("buf0", x * 2)
|
| 16 |
+
|
| 17 |
+
This is limited by the set of operators handled in the sympy expression
|
| 18 |
+
printers. So simple operations like minimum and maximum cannot be translated to
|
| 19 |
+
SymPy expressions yet, despite sympy.Min and sympy.Max existing.
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
import itertools
|
| 23 |
+
from dataclasses import dataclass
|
| 24 |
+
from typing import Any, Callable, Dict, Literal, Optional, overload, Tuple, Union
|
| 25 |
+
|
| 26 |
+
import sympy
|
| 27 |
+
|
| 28 |
+
from typing_extensions import TypeAlias
|
| 29 |
+
|
| 30 |
+
import torch
|
| 31 |
+
from torch._prims_common import is_boolean_dtype, is_integer_dtype
|
| 32 |
+
from torch.utils._sympy.functions import FloorDiv, ModularIndexing, Where
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@dataclass
|
| 36 |
+
class TypedExpr:
|
| 37 |
+
"""A SymPy expression with associated type"""
|
| 38 |
+
|
| 39 |
+
expr: sympy.Expr
|
| 40 |
+
dtype: torch.dtype
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class SymPyOps:
|
| 44 |
+
"""An ops handler where all IR values are SymPy expressions
|
| 45 |
+
|
| 46 |
+
When a value cannot be represented as a SymPy expression, the method is
|
| 47 |
+
either not defined, or returns NotImplemented
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def identity(value: Any) -> Any:
|
| 53 |
+
return value
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr:
|
| 57 |
+
if is_boolean_dtype(dtype):
|
| 58 |
+
expr = sympy.Integer(bool(value))
|
| 59 |
+
elif is_integer_dtype(dtype):
|
| 60 |
+
expr = sympy.Integer(int(value))
|
| 61 |
+
else:
|
| 62 |
+
expr = sympy.Float(float(value))
|
| 63 |
+
return TypedExpr(expr, dtype)
|
| 64 |
+
|
| 65 |
+
@staticmethod
|
| 66 |
+
def index_expr(value: sympy.Expr, dtype: torch.dtype) -> Union[int, TypedExpr]:
|
| 67 |
+
if isinstance(value, int):
|
| 68 |
+
value = sympy.Integer(value)
|
| 69 |
+
return TypedExpr(value, dtype)
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def to_dtype(
|
| 73 |
+
value: Any, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None
|
| 74 |
+
) -> Union[int, TypedExpr]:
|
| 75 |
+
if isinstance(value.expr, (sympy.Integer, sympy.Float)):
|
| 76 |
+
return SymPyOps.constant(value.expr, dtype)
|
| 77 |
+
elif is_integer_dtype(dtype) and is_integer_dtype(value.dtype):
|
| 78 |
+
return SymPyOps.index_expr(value.expr, dtype)
|
| 79 |
+
else:
|
| 80 |
+
# TODO: Inductor doesn't handle floating point in sympy expressions well at the moment
|
| 81 |
+
return NotImplemented
|
| 82 |
+
|
| 83 |
+
@staticmethod
|
| 84 |
+
def square(x: TypedExpr) -> TypedExpr:
|
| 85 |
+
return TypedExpr(x.expr * x.expr, x.dtype)
|
| 86 |
+
|
| 87 |
+
@staticmethod
|
| 88 |
+
def add(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 89 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 90 |
+
return TypedExpr(x.expr + y.expr, result_type)
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 94 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 95 |
+
return TypedExpr(x.expr - y.expr, result_type)
|
| 96 |
+
|
| 97 |
+
@staticmethod
|
| 98 |
+
def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 99 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 100 |
+
return TypedExpr(x.expr * y.expr, result_type)
|
| 101 |
+
|
| 102 |
+
@staticmethod
|
| 103 |
+
def neg(x: TypedExpr) -> TypedExpr:
|
| 104 |
+
return TypedExpr(-x.expr, x.dtype)
|
| 105 |
+
|
| 106 |
+
@staticmethod
|
| 107 |
+
def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 108 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 109 |
+
if not is_integer_dtype(result_type):
|
| 110 |
+
return NotImplemented
|
| 111 |
+
|
| 112 |
+
return TypedExpr(FloorDiv(x.expr, y.expr), result_type)
|
| 113 |
+
|
| 114 |
+
@staticmethod
|
| 115 |
+
def mod(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
|
| 116 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 117 |
+
if not is_integer_dtype(result_type):
|
| 118 |
+
return NotImplemented
|
| 119 |
+
|
| 120 |
+
result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
|
| 121 |
+
return TypedExpr(result_expr, result_type)
|
| 122 |
+
|
| 123 |
+
@staticmethod
|
| 124 |
+
def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
|
| 125 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 126 |
+
if not is_integer_dtype(result_type):
|
| 127 |
+
return NotImplemented
|
| 128 |
+
# In these cases, remainder in Python == remainder in C++, so this transformation
|
| 129 |
+
# is sound
|
| 130 |
+
if (
|
| 131 |
+
x.expr.is_nonnegative is not None
|
| 132 |
+
and x.expr.is_nonnegative == y.expr.is_positive
|
| 133 |
+
):
|
| 134 |
+
result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
|
| 135 |
+
return TypedExpr(result_expr, result_type)
|
| 136 |
+
return NotImplemented
|
| 137 |
+
|
| 138 |
+
@staticmethod
|
| 139 |
+
def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 140 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 141 |
+
return TypedExpr(sympy.Min(x.expr, y.expr), result_type)
|
| 142 |
+
|
| 143 |
+
@staticmethod
|
| 144 |
+
def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 145 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 146 |
+
return TypedExpr(sympy.Max(x.expr, y.expr), result_type)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@dataclass
|
| 150 |
+
class IndexPropVar:
|
| 151 |
+
value: Any # Either an IR value, or TypedExpr if is_symbolic is true
|
| 152 |
+
is_symbolic: bool = False
|
| 153 |
+
|
| 154 |
+
@staticmethod
|
| 155 |
+
def new_symbolic(expr: TypedExpr) -> "IndexPropVar":
|
| 156 |
+
return IndexPropVar(expr, is_symbolic=True)
|
| 157 |
+
|
| 158 |
+
def __post_init__(self):
|
| 159 |
+
assert not self.is_symbolic or isinstance(
|
| 160 |
+
self.value, TypedExpr
|
| 161 |
+
), "Symbolic IndexPropVar must contain a TypedExpr"
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
IndexPropResult: TypeAlias = Union[IndexPropVar, Tuple["IndexPropResult", ...]]
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class IndexPropagation:
|
| 168 |
+
"""Ops wrapper that tries to propagate constant and index_expr values through the computation.
|
| 169 |
+
|
| 170 |
+
This aims to maximize the compile time simplification possible, and convert
|
| 171 |
+
indirect indexing from arange into normal static indexing.
|
| 172 |
+
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
def __init__(self, inner: Any):
|
| 176 |
+
self._inner = inner
|
| 177 |
+
|
| 178 |
+
def materialize_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> Any:
|
| 179 |
+
# Construct a new constant/index_expr from the SymPy expression
|
| 180 |
+
if isinstance(expr, sympy.Integer):
|
| 181 |
+
return self._inner.constant(int(expr), dtype)
|
| 182 |
+
elif expr.is_number:
|
| 183 |
+
return self._inner.constant(float(expr), dtype)
|
| 184 |
+
return self._inner.index_expr(expr, dtype)
|
| 185 |
+
|
| 186 |
+
def unwrap(self, a: Union[Any, IndexPropVar]) -> Any:
|
| 187 |
+
if isinstance(a, (list, tuple)):
|
| 188 |
+
return tuple(self.unwrap(v) for v in a)
|
| 189 |
+
|
| 190 |
+
if not isinstance(a, IndexPropVar):
|
| 191 |
+
return a
|
| 192 |
+
|
| 193 |
+
# Prefer the sympy representation if possible
|
| 194 |
+
if a.is_symbolic:
|
| 195 |
+
return self.materialize_expr(a.value.expr, a.value.dtype)
|
| 196 |
+
|
| 197 |
+
return a.value
|
| 198 |
+
|
| 199 |
+
def wrap(self, a) -> IndexPropResult:
|
| 200 |
+
if isinstance(a, (list, tuple)):
|
| 201 |
+
return tuple(self.wrap(v) for v in a)
|
| 202 |
+
return IndexPropVar(a)
|
| 203 |
+
|
| 204 |
+
@overload
|
| 205 |
+
def fallback(
|
| 206 |
+
self,
|
| 207 |
+
name: Literal["indirect_indexing"],
|
| 208 |
+
args: Tuple[Any, ...],
|
| 209 |
+
kwargs: Dict[str, Any],
|
| 210 |
+
) -> IndexPropVar:
|
| 211 |
+
...
|
| 212 |
+
|
| 213 |
+
@overload
|
| 214 |
+
def fallback(
|
| 215 |
+
self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
| 216 |
+
) -> IndexPropResult:
|
| 217 |
+
...
|
| 218 |
+
|
| 219 |
+
def fallback(
|
| 220 |
+
self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
| 221 |
+
) -> IndexPropResult:
|
| 222 |
+
# Fallback to the wrapped handler
|
| 223 |
+
new_args = [self.unwrap(a) for a in args]
|
| 224 |
+
new_kwargs = {k: self.unwrap(v) for k, v in kwargs.items()}
|
| 225 |
+
return self.wrap(getattr(self._inner, name)(*new_args, **new_kwargs))
|
| 226 |
+
|
| 227 |
+
def propagate_sympy(
|
| 228 |
+
self, name: str, args: Tuple[Any, ...], kwargs: Dict[str, Any]
|
| 229 |
+
) -> IndexPropResult:
|
| 230 |
+
# Build a new SymPy expression from this ops call
|
| 231 |
+
def unwrap(a: Union[Any, IndexPropVar]) -> Any:
|
| 232 |
+
if not isinstance(a, IndexPropVar):
|
| 233 |
+
return a
|
| 234 |
+
return a.value
|
| 235 |
+
|
| 236 |
+
new_args = [unwrap(a) for a in args]
|
| 237 |
+
new_kwargs = {k: unwrap(v) for k, v in kwargs.items()}
|
| 238 |
+
new_expr = getattr(SymPyOps, name)(*new_args, **new_kwargs)
|
| 239 |
+
is_valid_expr = new_expr is not NotImplemented and (
|
| 240 |
+
# Inductor doesn't expect floating point in sympy expressions, but
|
| 241 |
+
# allow floating point constants to be propagated
|
| 242 |
+
isinstance(new_expr.expr, sympy.Number)
|
| 243 |
+
or new_expr.expr.is_integer
|
| 244 |
+
)
|
| 245 |
+
if not is_valid_expr:
|
| 246 |
+
return self.fallback(name, args, kwargs)
|
| 247 |
+
return IndexPropVar.new_symbolic(new_expr)
|
| 248 |
+
|
| 249 |
+
def __getattr__(self, name: str) -> Callable[..., IndexPropResult]:
|
| 250 |
+
def inner(*args: Any, **kwargs: Any) -> IndexPropResult:
|
| 251 |
+
if not hasattr(SymPyOps, name):
|
| 252 |
+
return self.fallback(name, args, kwargs)
|
| 253 |
+
|
| 254 |
+
var_arguments = [
|
| 255 |
+
a
|
| 256 |
+
for a in itertools.chain(args, kwargs.values())
|
| 257 |
+
if isinstance(a, IndexPropVar)
|
| 258 |
+
]
|
| 259 |
+
if not all(v.is_symbolic for v in var_arguments):
|
| 260 |
+
return self.fallback(name, args, kwargs)
|
| 261 |
+
|
| 262 |
+
return self.propagate_sympy(name, args, kwargs)
|
| 263 |
+
|
| 264 |
+
return inner
|
| 265 |
+
|
| 266 |
+
def indirect_indexing(
|
| 267 |
+
self, index: Union[Any, IndexPropVar], size: Any, check: bool = True
|
| 268 |
+
) -> Any:
|
| 269 |
+
# nb. We do index + Where(...) rather than Where(idx >= 0, idx, idx + sz) because we don't have CSE
|
| 270 |
+
# for SymPy expressions, so we don't want to repeat idx too much
|
| 271 |
+
|
| 272 |
+
# indirect_indexing returns a sympy value, so no need to wrap in IndexPropVar here
|
| 273 |
+
if isinstance(index, IndexPropVar) and index.is_symbolic:
|
| 274 |
+
# If we are turning a indirect indexing into direct, we need to wrap it.
|
| 275 |
+
index = index.value.expr
|
| 276 |
+
return index + Where(index >= 0, 0, size)
|
| 277 |
+
return self.fallback("indirect_indexing", (index, size, check), {}).value
|
vila/lib/python3.10/site-packages/torch/_inductor/ir.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
vila/lib/python3.10/site-packages/torch/_inductor/metrics.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import csv
|
| 4 |
+
import inspect
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from functools import lru_cache
|
| 9 |
+
|
| 10 |
+
from typing import Dict, List, Set, Tuple, TYPE_CHECKING, Union
|
| 11 |
+
|
| 12 |
+
from torch._inductor import config
|
| 13 |
+
from torch._inductor.utils import get_benchmark_name
|
| 14 |
+
|
| 15 |
+
# Prevent circular import
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from torch._inductor.scheduler import (
|
| 18 |
+
BaseSchedulerNode,
|
| 19 |
+
ExternKernelSchedulerNode,
|
| 20 |
+
NopKernelSchedulerNode,
|
| 21 |
+
SchedulerNode,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# counter for tracking how many kernels have been generated
|
| 25 |
+
generated_kernel_count = 0
|
| 26 |
+
generated_cpp_vec_kernel_count = 0
|
| 27 |
+
num_bytes_accessed = 0
|
| 28 |
+
nodes_num_elem: List[
|
| 29 |
+
Tuple[
|
| 30 |
+
Union[NopKernelSchedulerNode, SchedulerNode, ExternKernelSchedulerNode],
|
| 31 |
+
int,
|
| 32 |
+
]
|
| 33 |
+
] = []
|
| 34 |
+
node_runtimes: List[Tuple[BaseSchedulerNode, float]] = []
|
| 35 |
+
|
| 36 |
+
# counters for tracking fusions
|
| 37 |
+
ir_nodes_pre_fusion = 0
|
| 38 |
+
|
| 39 |
+
# counters for tracking to_dtype inserted
|
| 40 |
+
cpp_to_dtype_count = 0
|
| 41 |
+
|
| 42 |
+
# counters for tracking cpp_wrapper disabled
|
| 43 |
+
disable_cpp_wrapper = 0
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# reset all counters
|
| 47 |
+
def reset():
|
| 48 |
+
global generated_kernel_count
|
| 49 |
+
global generated_cpp_vec_kernel_count
|
| 50 |
+
global num_bytes_accessed, nodes_num_elem
|
| 51 |
+
global ir_nodes_pre_fusion
|
| 52 |
+
global cpp_to_dtype_count
|
| 53 |
+
global disable_cpp_wrapper
|
| 54 |
+
|
| 55 |
+
generated_kernel_count = 0
|
| 56 |
+
generated_cpp_vec_kernel_count = 0
|
| 57 |
+
num_bytes_accessed = 0
|
| 58 |
+
nodes_num_elem.clear()
|
| 59 |
+
node_runtimes.clear()
|
| 60 |
+
ir_nodes_pre_fusion = 0
|
| 61 |
+
cpp_to_dtype_count = 0
|
| 62 |
+
disable_cpp_wrapper = 0
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@dataclass
|
| 66 |
+
class CachedMetricsDeltas:
|
| 67 |
+
"""
|
| 68 |
+
The subset of metrics we want update across cache hits, e.g., the
|
| 69 |
+
FxGraphCache.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
generated_kernel_count: int
|
| 73 |
+
generated_cpp_vec_kernel_count: int
|
| 74 |
+
ir_nodes_pre_fusion: int
|
| 75 |
+
cpp_to_dtype_count: int
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class CachedMetricsHelper:
|
| 79 |
+
"""
|
| 80 |
+
A helper class to help calculate and apply counter deltas for those
|
| 81 |
+
metrics we want to save with cache entries (e.g., FxGraphCache) and
|
| 82 |
+
apply on a cache hit.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self):
|
| 86 |
+
global generated_kernel_count
|
| 87 |
+
global generated_cpp_vec_kernel_count
|
| 88 |
+
global ir_nodes_pre_fusion
|
| 89 |
+
global cpp_to_dtype_count
|
| 90 |
+
|
| 91 |
+
self.generated_kernel_count = generated_kernel_count
|
| 92 |
+
self.generated_cpp_vec_kernel_count = generated_cpp_vec_kernel_count
|
| 93 |
+
self.ir_nodes_pre_fusion = ir_nodes_pre_fusion
|
| 94 |
+
self.cpp_to_dtype_count = cpp_to_dtype_count
|
| 95 |
+
|
| 96 |
+
def get_deltas(self) -> CachedMetricsDeltas:
|
| 97 |
+
global generated_kernel_count
|
| 98 |
+
global generated_cpp_vec_kernel_count
|
| 99 |
+
global ir_nodes_pre_fusion
|
| 100 |
+
global cpp_to_dtype_count
|
| 101 |
+
|
| 102 |
+
return CachedMetricsDeltas(
|
| 103 |
+
generated_kernel_count - self.generated_kernel_count,
|
| 104 |
+
generated_cpp_vec_kernel_count - self.generated_cpp_vec_kernel_count,
|
| 105 |
+
ir_nodes_pre_fusion - self.ir_nodes_pre_fusion,
|
| 106 |
+
cpp_to_dtype_count - self.cpp_to_dtype_count,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
@staticmethod
|
| 110 |
+
def apply_deltas(delta: CachedMetricsDeltas):
|
| 111 |
+
global generated_kernel_count
|
| 112 |
+
global generated_cpp_vec_kernel_count
|
| 113 |
+
global ir_nodes_pre_fusion
|
| 114 |
+
global cpp_to_dtype_count
|
| 115 |
+
|
| 116 |
+
generated_kernel_count += delta.generated_kernel_count
|
| 117 |
+
generated_cpp_vec_kernel_count += delta.generated_cpp_vec_kernel_count
|
| 118 |
+
ir_nodes_pre_fusion += delta.ir_nodes_pre_fusion
|
| 119 |
+
cpp_to_dtype_count += delta.cpp_to_dtype_count
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
REGISTERED_METRIC_TABLES: Dict[str, MetricTable] = {}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@dataclass
|
| 126 |
+
class MetricTable:
|
| 127 |
+
table_name: str
|
| 128 |
+
column_names: List[str]
|
| 129 |
+
|
| 130 |
+
num_rows_added: int = 0
|
| 131 |
+
|
| 132 |
+
def add_row(self, row_fn):
|
| 133 |
+
if self.table_name not in enabled_metric_tables():
|
| 134 |
+
return
|
| 135 |
+
|
| 136 |
+
row_dict = row_fn()
|
| 137 |
+
assert len(self.column_names) == len(
|
| 138 |
+
row_dict
|
| 139 |
+
), f"{len(self.column_names)} v.s. {len(row_dict)}"
|
| 140 |
+
assert set(self.column_names) == set(
|
| 141 |
+
row_dict.keys()
|
| 142 |
+
), f"{set(self.column_names)} v.s. {set(row_dict.keys())}"
|
| 143 |
+
|
| 144 |
+
row = [
|
| 145 |
+
get_benchmark_name(),
|
| 146 |
+
]
|
| 147 |
+
row += [row_dict[column_name] for column_name in self.column_names]
|
| 148 |
+
self._write_row(row)
|
| 149 |
+
|
| 150 |
+
def output_filename(self):
|
| 151 |
+
return f"metric_table_{self.table_name}.csv"
|
| 152 |
+
|
| 153 |
+
def write_header(self):
|
| 154 |
+
filename = self.output_filename()
|
| 155 |
+
with open(filename, "w") as fd:
|
| 156 |
+
writer = csv.writer(fd, lineterminator="\n")
|
| 157 |
+
writer.writerow(["model_name"] + self.column_names)
|
| 158 |
+
|
| 159 |
+
def _write_row(self, row):
|
| 160 |
+
filename = self.output_filename()
|
| 161 |
+
if self.num_rows_added == 0 and not os.path.exists(filename):
|
| 162 |
+
self.write_header()
|
| 163 |
+
|
| 164 |
+
self.num_rows_added += 1
|
| 165 |
+
|
| 166 |
+
for idx, orig_val in enumerate(row):
|
| 167 |
+
if isinstance(orig_val, float):
|
| 168 |
+
new_val = f"{orig_val:.6f}"
|
| 169 |
+
elif orig_val is None:
|
| 170 |
+
new_val = ""
|
| 171 |
+
else:
|
| 172 |
+
new_val = orig_val
|
| 173 |
+
row[idx] = new_val
|
| 174 |
+
|
| 175 |
+
with open(filename, "a") as fd:
|
| 176 |
+
writer = csv.writer(fd, lineterminator="\n")
|
| 177 |
+
writer.writerow(row)
|
| 178 |
+
|
| 179 |
+
@staticmethod
|
| 180 |
+
def register_table(name, column_names):
|
| 181 |
+
table = MetricTable(name, column_names)
|
| 182 |
+
REGISTERED_METRIC_TABLES[name] = table
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
MetricTable.register_table(
|
| 186 |
+
"slow_fusion",
|
| 187 |
+
[
|
| 188 |
+
"kernel1_path",
|
| 189 |
+
"kernel1_latency",
|
| 190 |
+
"kernel2_path",
|
| 191 |
+
"kernel2_latency",
|
| 192 |
+
"fused_kernel_path",
|
| 193 |
+
"fused_kernel_latency",
|
| 194 |
+
"slow_down_ratio",
|
| 195 |
+
],
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# track the fusion statistics for each graph
|
| 199 |
+
MetricTable.register_table(
|
| 200 |
+
"graph_stats",
|
| 201 |
+
[
|
| 202 |
+
"graph_id",
|
| 203 |
+
"num_nodes_before_fusion",
|
| 204 |
+
"num_nodes_after_fusion",
|
| 205 |
+
],
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
# track the perf difference between persistent reduction and non-persistent
|
| 209 |
+
# reductions
|
| 210 |
+
MetricTable.register_table(
|
| 211 |
+
"persistent_red_perf",
|
| 212 |
+
[
|
| 213 |
+
"kernel1_name",
|
| 214 |
+
"kernel2_name",
|
| 215 |
+
"kernel1_latency",
|
| 216 |
+
"kernel2_latency",
|
| 217 |
+
"size_hints",
|
| 218 |
+
"reduction_hint",
|
| 219 |
+
"speedup",
|
| 220 |
+
],
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# Log metadata for pointwise/reduction kernels. E.g., model name, kernel path, numel, rnumel, reduction hint
|
| 224 |
+
MetricTable.register_table(
|
| 225 |
+
"kernel_metadata",
|
| 226 |
+
[
|
| 227 |
+
"kernel_name",
|
| 228 |
+
"kernel_path",
|
| 229 |
+
"kernel_category", # pointwise/reduction/foreach etc.
|
| 230 |
+
"size_hints",
|
| 231 |
+
"reduction_hint",
|
| 232 |
+
"line_of_code",
|
| 233 |
+
"num_load",
|
| 234 |
+
"num_store",
|
| 235 |
+
"num_for_loop",
|
| 236 |
+
"num_atomic_add",
|
| 237 |
+
"num_args",
|
| 238 |
+
# xyz numel can be different to size_hints since size_hints are rounded
|
| 239 |
+
# up to the nearest power of 2.
|
| 240 |
+
# Inductor kernel will burn in the xyz numel in kernel code for static
|
| 241 |
+
# shape kernels.
|
| 242 |
+
# Logging them will be helpful to find unaligned shape for reduction
|
| 243 |
+
"xnumel",
|
| 244 |
+
"ynumel",
|
| 245 |
+
"rnumel",
|
| 246 |
+
"kernel_args_num_gb",
|
| 247 |
+
],
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _parse_kernel_fn_code(kernel_module_code):
|
| 252 |
+
"""
|
| 253 |
+
The kernel_module_code is the python module that contains kernel function code.
|
| 254 |
+
kernel function is the proper triton kernel function annotated with
|
| 255 |
+
@triton.jit
|
| 256 |
+
"""
|
| 257 |
+
from .codecache import PyCodeCache
|
| 258 |
+
from .wrapper_benchmark import get_triton_kernel
|
| 259 |
+
|
| 260 |
+
mod = PyCodeCache.load(kernel_module_code)
|
| 261 |
+
kernel = get_triton_kernel(mod)
|
| 262 |
+
# kernel is a CachingAutotune; kernel.fn is the JITFunction;
|
| 263 |
+
# kernel.fn.fn is the function being decorate by triton.jit
|
| 264 |
+
return inspect.getsource(kernel.fn.fn)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def _parse_kernel_line_of_code(proper_kernel_fn_code):
|
| 268 |
+
"""
|
| 269 |
+
Return the line of code for the kernel excluding the decorators.
|
| 270 |
+
"""
|
| 271 |
+
return len(proper_kernel_fn_code.splitlines())
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def _parse_size_hints(kernel_module_code, kernel_category):
|
| 275 |
+
if kernel_category == "foreach":
|
| 276 |
+
# foreach kernel does not have size_hints
|
| 277 |
+
return None
|
| 278 |
+
m = re.search(r"size_hints=(\[[0-9, ]*\]),", kernel_module_code)
|
| 279 |
+
assert m, "size_hints missing!"
|
| 280 |
+
return m.group(1)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _parse_reduction_hint(kernel_category, kernel_module_code):
|
| 284 |
+
if kernel_category not in ("reduction", "persistent_reduction"):
|
| 285 |
+
return None
|
| 286 |
+
m = re.search(r"reduction_hint=ReductionHint\.(\w*),", kernel_module_code)
|
| 287 |
+
assert m, "reduction_hint not found in kernel source code!"
|
| 288 |
+
return m.group(1)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def _count_pattern(proper_kernel_fn_code, pattern):
|
| 292 |
+
return proper_kernel_fn_code.count(pattern)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def _count_args(proper_kernel_fn_code):
|
| 296 |
+
def_line = proper_kernel_fn_code.splitlines()[0]
|
| 297 |
+
assert def_line.startswith("def ")
|
| 298 |
+
start_idx = def_line.index("(")
|
| 299 |
+
end_idx = def_line.index("):")
|
| 300 |
+
decl_csv = def_line[start_idx + 1 : end_idx]
|
| 301 |
+
comps = decl_csv.split(",")
|
| 302 |
+
return len(comps)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _parse_proper_kernel_fn_code(kernel_fn_code):
|
| 306 |
+
"""
|
| 307 |
+
Skip decorators.
|
| 308 |
+
"""
|
| 309 |
+
start_pos = kernel_fn_code.index("def ")
|
| 310 |
+
return kernel_fn_code[start_pos:]
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def _parse_numel(proper_kernel_fn_code, numel_arg_name):
|
| 314 |
+
m = re.search(f"{numel_arg_name} = ([\\d]+)", proper_kernel_fn_code)
|
| 315 |
+
if m:
|
| 316 |
+
return int(m.group(1))
|
| 317 |
+
else:
|
| 318 |
+
return None
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def _parse_kernel_args_num_gb(kernel_fn_code, kernel_category):
|
| 322 |
+
"""
|
| 323 |
+
inductor meta looks like:
|
| 324 |
+
inductor_meta={... 'mutated_arg_names': [], 'no_x_dim': False, 'kernel_num_gb': 2.0},
|
| 325 |
+
"""
|
| 326 |
+
m = re.search(r".kernel_num_gb.:\s*([0-9.]+)", kernel_fn_code)
|
| 327 |
+
if m:
|
| 328 |
+
return float(m.group(1))
|
| 329 |
+
else:
|
| 330 |
+
"""
|
| 331 |
+
There are a few cases that kernel_num_gdb field can be missing:
|
| 332 |
+
1. the field will be missing if config.benchmark_kernel and
|
| 333 |
+
config.profile_bandwidth are false
|
| 334 |
+
2. even if config.benchmark_kernel or config.profile_bandwidth is true.
|
| 335 |
+
foreach kernel does not have kernel_num_gb field in the metadata
|
| 336 |
+
"""
|
| 337 |
+
return None
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def log_kernel_metadata(kernel_name, kernel_path, kernel_module_code):
|
| 341 |
+
"""
|
| 342 |
+
An utility to log kernel metadata. We may parse metadata from kernel source code here.
|
| 343 |
+
|
| 344 |
+
It's fine to parse the generated kernel code here since the logging is
|
| 345 |
+
disabled by default. It would hurt compilation time.
|
| 346 |
+
"""
|
| 347 |
+
from .wrapper_benchmark import get_kernel_category_by_source_code
|
| 348 |
+
|
| 349 |
+
kernel_category = get_kernel_category_by_source_code(kernel_module_code)
|
| 350 |
+
reduction_hint = _parse_reduction_hint(kernel_category, kernel_module_code)
|
| 351 |
+
size_hints = _parse_size_hints(kernel_module_code, kernel_category)
|
| 352 |
+
kernel_fn_code = _parse_kernel_fn_code(kernel_module_code)
|
| 353 |
+
|
| 354 |
+
proper_kernel_fn_code = _parse_proper_kernel_fn_code(kernel_fn_code)
|
| 355 |
+
|
| 356 |
+
# the line of code excluding the decortors
|
| 357 |
+
kernel_line_of_code = _parse_kernel_line_of_code(proper_kernel_fn_code)
|
| 358 |
+
|
| 359 |
+
get_metric_table("kernel_metadata").add_row(
|
| 360 |
+
lambda: {
|
| 361 |
+
"kernel_name": kernel_name,
|
| 362 |
+
"kernel_path": kernel_path,
|
| 363 |
+
"kernel_category": kernel_category,
|
| 364 |
+
"size_hints": size_hints,
|
| 365 |
+
"reduction_hint": reduction_hint,
|
| 366 |
+
"line_of_code": kernel_line_of_code,
|
| 367 |
+
"num_load": _count_pattern(proper_kernel_fn_code, "tl.load"),
|
| 368 |
+
"num_store": _count_pattern(proper_kernel_fn_code, "tl.store"),
|
| 369 |
+
"num_for_loop": _count_pattern(proper_kernel_fn_code, "for "),
|
| 370 |
+
"num_atomic_add": _count_pattern(proper_kernel_fn_code, "tl.atomic_add"),
|
| 371 |
+
"num_args": _count_args(proper_kernel_fn_code),
|
| 372 |
+
"xnumel": _parse_numel(proper_kernel_fn_code, "xnumel"),
|
| 373 |
+
"ynumel": _parse_numel(proper_kernel_fn_code, "ynumel"),
|
| 374 |
+
"rnumel": _parse_numel(proper_kernel_fn_code, "rnumel"),
|
| 375 |
+
"kernel_args_num_gb": _parse_kernel_args_num_gb(
|
| 376 |
+
kernel_fn_code, kernel_category
|
| 377 |
+
),
|
| 378 |
+
}
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def purge_old_log_files():
|
| 383 |
+
"""
|
| 384 |
+
Purge the old log file at the beginning when the benchmark script runs.
|
| 385 |
+
Should do it in the parent process rather than the child processes running
|
| 386 |
+
each individual model.
|
| 387 |
+
"""
|
| 388 |
+
for name, table in REGISTERED_METRIC_TABLES.items():
|
| 389 |
+
if name in enabled_metric_tables():
|
| 390 |
+
filename = table.output_filename()
|
| 391 |
+
if os.path.exists(filename):
|
| 392 |
+
os.unlink(filename)
|
| 393 |
+
|
| 394 |
+
table.write_header()
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
@lru_cache
|
| 398 |
+
def enabled_metric_tables() -> Set[str]:
|
| 399 |
+
config_str = config.enabled_metric_tables
|
| 400 |
+
|
| 401 |
+
enabled = set()
|
| 402 |
+
for name in config_str.split(","):
|
| 403 |
+
name = name.strip()
|
| 404 |
+
if not name:
|
| 405 |
+
continue
|
| 406 |
+
assert (
|
| 407 |
+
name in REGISTERED_METRIC_TABLES
|
| 408 |
+
), f"Metric table name {name} is not registered"
|
| 409 |
+
enabled.add(name)
|
| 410 |
+
return enabled
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def is_metric_table_enabled(name):
|
| 414 |
+
return name in enabled_metric_tables()
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def get_metric_table(name):
|
| 418 |
+
assert name in REGISTERED_METRIC_TABLES, f"Metric table {name} is not defined"
|
| 419 |
+
return REGISTERED_METRIC_TABLES[name]
|
vila/lib/python3.10/site-packages/torch/_inductor/ops_handler.py
ADDED
|
@@ -0,0 +1,655 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from typing import Any, Callable, Generic, Literal, Optional, Tuple, TypeVar, Union
|
| 3 |
+
from unittest.mock import patch
|
| 4 |
+
|
| 5 |
+
import sympy
|
| 6 |
+
from typing_extensions import Protocol
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.utils._pytree as pytree
|
| 10 |
+
from torch.fx.graph import inplace_methods, magic_methods
|
| 11 |
+
from .utils import IndentedBuffer, reduction_num_outputs, sympy_index_symbol, sympy_str
|
| 12 |
+
|
| 13 |
+
T = TypeVar("T")
|
| 14 |
+
StoreMode = Optional[Literal["atomic_add"]]
|
| 15 |
+
ReductionType = Literal[
|
| 16 |
+
"argmax",
|
| 17 |
+
"argmin",
|
| 18 |
+
"welford_reduce",
|
| 19 |
+
"welford_combine",
|
| 20 |
+
"any",
|
| 21 |
+
"max",
|
| 22 |
+
"min",
|
| 23 |
+
"prod",
|
| 24 |
+
"sum",
|
| 25 |
+
"xor_sum",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _arg_str(a) -> str:
|
| 30 |
+
if isinstance(a, sympy.Expr):
|
| 31 |
+
return sympy_str(a)
|
| 32 |
+
return str(a)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# NB: This is not done as a parent class, because our ops handlers
|
| 36 |
+
# implementations make heavy use of __getattr__ magic, and pre-existing
|
| 37 |
+
# stubs for methods would interfere with this mechanism.
|
| 38 |
+
#
|
| 39 |
+
# TODO: A superclass that does desugaring for operations like
|
| 40 |
+
# reciprocal/square might be useful.
|
| 41 |
+
class OpsHandler(Protocol[T]):
|
| 42 |
+
"""
|
| 43 |
+
Protocol describing the set of valid operations on ``torch._inductor.virtualized.ops``,
|
| 44 |
+
as well as the contract for op handlers. The type T signifies the domain
|
| 45 |
+
of the abstract analysis AKA what all of the functions return / take as arguments
|
| 46 |
+
anywhere compute occurs.
|
| 47 |
+
|
| 48 |
+
While these operators are typically dtype polymorphic (e.g., you can use mul
|
| 49 |
+
on both integers and floats), they do NOT do promotion and usually return the
|
| 50 |
+
same dtype as the input. You are expected to have handled type promotion
|
| 51 |
+
during ATen decompositions. Most operators correspond exactly to pointwise
|
| 52 |
+
operations as defined by torch, so when in doubt about semantics, check the
|
| 53 |
+
corresponding torch documentation. These are all scalar operations (so they
|
| 54 |
+
are defined to operate on a single element at a time.)
|
| 55 |
+
|
| 56 |
+
For convenience, many operators take a src_dtype which indicates what the dtype
|
| 57 |
+
of the input argument is. Although in principle this can be derived by an
|
| 58 |
+
analysis, providing this for ops where it is useful helps avoid having to repeatedly
|
| 59 |
+
recompute dtype in code generation.
|
| 60 |
+
|
| 61 |
+
Note that this often describes a class of static methods, for stateless
|
| 62 |
+
ops handlers.
|
| 63 |
+
|
| 64 |
+
Handlers are often defined using ``__getattr__`` metaprogramming, which means
|
| 65 |
+
that you cannot declare that a type implements a protocol by inheriting from
|
| 66 |
+
it (as the type stubs count as attribute declarations and impede the getattr
|
| 67 |
+
magic method from being called). Instead, define a function that casts an
|
| 68 |
+
argument of your type to the protocol, which is sufficient to induce mypy to
|
| 69 |
+
test that the protocol is implemented correctly. Search for ``_typecheck_``
|
| 70 |
+
in this file to see some examples. If you see an obscure error where a
|
| 71 |
+
class doesn't implement a Protocol, but mypy doesn't say why, check to see
|
| 72 |
+
that ``__getattr__`` is typed correctly (typically, it is not possible to
|
| 73 |
+
type ``__getattr__`` without typing it as ``Callable[..., Any]``)
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
def constant(self, value: Union[bool, float, int], dtype: torch.dtype) -> T:
|
| 77 |
+
"""Produces a scalar constant of type dtype."""
|
| 78 |
+
...
|
| 79 |
+
|
| 80 |
+
def load_seed(self, name: str, offset: T):
|
| 81 |
+
"""Computes inductor_prims.lookup_seed."""
|
| 82 |
+
...
|
| 83 |
+
|
| 84 |
+
def rand(self, seed: T, offset: T) -> T:
|
| 85 |
+
"""Computes inductor_prims.random with mode="rand". offset has dtype int32."""
|
| 86 |
+
...
|
| 87 |
+
|
| 88 |
+
def randn(self, seed: T, offset: T) -> T:
|
| 89 |
+
"""Computes inductor_prims.random with mode="randn". offset has dtype int32."""
|
| 90 |
+
...
|
| 91 |
+
|
| 92 |
+
def randint64(self, seed: T, offset: T, low: T, high: T) -> T:
|
| 93 |
+
"""Computes inductor_prims.randint. offset has dtype int32."""
|
| 94 |
+
...
|
| 95 |
+
|
| 96 |
+
def masked(self, mask: T, body: Callable[[], T], other: T) -> T:
|
| 97 |
+
"""
|
| 98 |
+
Computes body, but only perform loads/stores if the boolean mask
|
| 99 |
+
evaluates to true. For example, you would use this if you needed to
|
| 100 |
+
perform an indirect load that may not be valid on some elements;
|
| 101 |
+
without masking, invalid accesses can cause IMAs. When mask is true,
|
| 102 |
+
the result is the result of body; otherwise it is other.
|
| 103 |
+
|
| 104 |
+
Contrast this with ops.where, which can multiplex between two values
|
| 105 |
+
that have been unconditionally computed.
|
| 106 |
+
"""
|
| 107 |
+
...
|
| 108 |
+
|
| 109 |
+
def where(self, condition: T, input: T, other: T) -> T:
|
| 110 |
+
"""
|
| 111 |
+
Computes torch.where: when condition is true, return input; otherwise return other.
|
| 112 |
+
"""
|
| 113 |
+
...
|
| 114 |
+
|
| 115 |
+
def index_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> T:
|
| 116 |
+
"""
|
| 117 |
+
Converts a sympy expression into a scalar of type dtype. expr is typically
|
| 118 |
+
an indexing expression, thus the name; however, it can also be used in
|
| 119 |
+
non-indexing situations.
|
| 120 |
+
"""
|
| 121 |
+
...
|
| 122 |
+
|
| 123 |
+
def to_dtype(
|
| 124 |
+
self, x: T, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None
|
| 125 |
+
) -> T:
|
| 126 |
+
"""
|
| 127 |
+
Convert x to dtype. src_dtype can be optionally set to specify what the original
|
| 128 |
+
dtype of x was, which can improve code generation (used by torch to(dtype=dtype)).
|
| 129 |
+
"""
|
| 130 |
+
...
|
| 131 |
+
|
| 132 |
+
def to_dtype_bitcast(self, x: T, dtype: torch.dtype, src_dtype: torch.dtype) -> T:
|
| 133 |
+
"""
|
| 134 |
+
Reinterpret cast x to dtype (reinterpreting the bits in memory as another dtype.)
|
| 135 |
+
src_dtype must be the original type of x.
|
| 136 |
+
"""
|
| 137 |
+
...
|
| 138 |
+
|
| 139 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 140 |
+
# These operations are only available in a "kernel" context. Check
|
| 141 |
+
# torch._inductor.codegen.common.CSEProxy for their typical implementation
|
| 142 |
+
# in op handler (routing to their respective implementations in the kernel
|
| 143 |
+
# handler)
|
| 144 |
+
#
|
| 145 |
+
# Importantly, inside a kernel, indexing and mask variables are available
|
| 146 |
+
# in scope, which are typically used by sympy.Expr indexing.
|
| 147 |
+
|
| 148 |
+
def indirect_indexing(
|
| 149 |
+
self, x: T, size: sympy.Expr, check: bool = True
|
| 150 |
+
) -> sympy.Expr:
|
| 151 |
+
"""
|
| 152 |
+
Convert an integral x into a sympy.Expr that can be subsequently used in
|
| 153 |
+
indexing computation. 'size' represents an upper bound on the what valid
|
| 154 |
+
indexes can be; when 'check' is True, we check that the x is in bounds.
|
| 155 |
+
|
| 156 |
+
NB: This is typically mandatory to implement for any analysis, because you
|
| 157 |
+
MUST return a valid sympy.Expr of some sort (even if it's a meaningless symbol).
|
| 158 |
+
"""
|
| 159 |
+
...
|
| 160 |
+
|
| 161 |
+
def load(self, name: str, index: sympy.Expr) -> T:
|
| 162 |
+
"""
|
| 163 |
+
Load from the memory location 'name', offset by some indexing expression 'index'.
|
| 164 |
+
"""
|
| 165 |
+
...
|
| 166 |
+
|
| 167 |
+
def store(
|
| 168 |
+
self,
|
| 169 |
+
name: str,
|
| 170 |
+
index: sympy.Expr,
|
| 171 |
+
value: T,
|
| 172 |
+
mode: StoreMode = None,
|
| 173 |
+
) -> None:
|
| 174 |
+
"""
|
| 175 |
+
Store 'value' to the memory location 'name' offset by 'expr'. If
|
| 176 |
+
specified, 'mode' can require the store to be an atomic addition.
|
| 177 |
+
"""
|
| 178 |
+
...
|
| 179 |
+
|
| 180 |
+
# TODO: Better explain how the "collective" semantics of these ops;
|
| 181 |
+
# remember that the input value is a scalar, you can't reduce on it in the
|
| 182 |
+
# traditional sense!
|
| 183 |
+
def reduction(
|
| 184 |
+
self,
|
| 185 |
+
dtype: torch.dtype,
|
| 186 |
+
src_dtype: torch.dtype,
|
| 187 |
+
reduction_type: ReductionType,
|
| 188 |
+
value: T,
|
| 189 |
+
) -> Union[T, Tuple[T, ...]]:
|
| 190 |
+
"""
|
| 191 |
+
Perform a 'reduction_type' reduction on 'value' of dtype 'src_dtype',
|
| 192 |
+
using 'dtype' as the accumulation dtype for the reduction. The result
|
| 193 |
+
is an intermediate computation which should be stored to the final
|
| 194 |
+
location using 'ops.store_reduction'.
|
| 195 |
+
|
| 196 |
+
Valid reduction types are . For Welford reduction types, this
|
| 197 |
+
function returns multiple outputs; consult reduction_num_outputs to
|
| 198 |
+
determine the amount in metaprogramming applications.
|
| 199 |
+
"""
|
| 200 |
+
...
|
| 201 |
+
|
| 202 |
+
# TODO: in practice, this seems to actually return None, but not returning
|
| 203 |
+
# a T makes common __getattr__ idioms not type correctly. Figure out if
|
| 204 |
+
# this should be returning something.
|
| 205 |
+
def store_reduction(self, name: str, index: sympy.Expr, value: T) -> T:
|
| 206 |
+
"""
|
| 207 |
+
Store the fully accumulated result of 'reduction' to the memory
|
| 208 |
+
location 'name' offset by 'expr'.
|
| 209 |
+
"""
|
| 210 |
+
...
|
| 211 |
+
|
| 212 |
+
def scan(
|
| 213 |
+
self, dtype: torch.dtype, combine_fn: Callable[[T, T], T], value: T, init: int
|
| 214 |
+
) -> T:
|
| 215 |
+
"""
|
| 216 |
+
Perform an associative scan on 'value'.
|
| 217 |
+
"""
|
| 218 |
+
# TODO: Improve the description with some pseudocode
|
| 219 |
+
...
|
| 220 |
+
|
| 221 |
+
def bucketize(
|
| 222 |
+
self,
|
| 223 |
+
values: T,
|
| 224 |
+
offsets_name: str,
|
| 225 |
+
offsets_size: sympy.Expr,
|
| 226 |
+
indexing_dtype: torch.dtype,
|
| 227 |
+
right: bool,
|
| 228 |
+
) -> T:
|
| 229 |
+
# See [Note: Inductor bucketize op]
|
| 230 |
+
...
|
| 231 |
+
|
| 232 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 233 |
+
# The following ops have semantics that correspond exactly to the torch
|
| 234 |
+
# operation with the same corresponding name.
|
| 235 |
+
|
| 236 |
+
def abs(self, x0: T) -> T:
|
| 237 |
+
...
|
| 238 |
+
|
| 239 |
+
def exp(self, x0: T) -> T:
|
| 240 |
+
...
|
| 241 |
+
|
| 242 |
+
def exp2(self, x0: T) -> T:
|
| 243 |
+
...
|
| 244 |
+
|
| 245 |
+
def expm1(self, x0: T) -> T:
|
| 246 |
+
...
|
| 247 |
+
|
| 248 |
+
def sqrt(self, x0: T) -> T:
|
| 249 |
+
...
|
| 250 |
+
|
| 251 |
+
def relu(self, x0: T) -> T:
|
| 252 |
+
...
|
| 253 |
+
|
| 254 |
+
def minimum(self, x0: T, x1: T) -> T:
|
| 255 |
+
...
|
| 256 |
+
|
| 257 |
+
def maximum(self, x0: T, x1: T) -> T:
|
| 258 |
+
...
|
| 259 |
+
|
| 260 |
+
def cos(self, x0: T) -> T:
|
| 261 |
+
...
|
| 262 |
+
|
| 263 |
+
def sin(self, x0: T) -> T:
|
| 264 |
+
...
|
| 265 |
+
|
| 266 |
+
def lgamma(self, x0: T) -> T:
|
| 267 |
+
...
|
| 268 |
+
|
| 269 |
+
def erf(self, x0: T) -> T:
|
| 270 |
+
...
|
| 271 |
+
|
| 272 |
+
def cosh(self, x0: T) -> T:
|
| 273 |
+
...
|
| 274 |
+
|
| 275 |
+
def sinh(self, x0: T) -> T:
|
| 276 |
+
...
|
| 277 |
+
|
| 278 |
+
def acos(self, x0: T) -> T:
|
| 279 |
+
...
|
| 280 |
+
|
| 281 |
+
def acosh(self, x0: T) -> T:
|
| 282 |
+
...
|
| 283 |
+
|
| 284 |
+
def asin(self, x0: T) -> T:
|
| 285 |
+
...
|
| 286 |
+
|
| 287 |
+
def asinh(self, x0: T) -> T:
|
| 288 |
+
...
|
| 289 |
+
|
| 290 |
+
def atan2(self, x0: T, x1: T) -> T:
|
| 291 |
+
...
|
| 292 |
+
|
| 293 |
+
def atan(self, x0: T) -> T:
|
| 294 |
+
...
|
| 295 |
+
|
| 296 |
+
def atanh(self, x0: T) -> T:
|
| 297 |
+
...
|
| 298 |
+
|
| 299 |
+
def copysign(self, x0: T, x1: T) -> T:
|
| 300 |
+
...
|
| 301 |
+
|
| 302 |
+
def erfc(self, x0: T) -> T:
|
| 303 |
+
...
|
| 304 |
+
|
| 305 |
+
def erfinv(self, x0: T) -> T:
|
| 306 |
+
...
|
| 307 |
+
|
| 308 |
+
def frexp(self, x0: T):
|
| 309 |
+
...
|
| 310 |
+
|
| 311 |
+
def hypot(self, x0: T, x1: T) -> T:
|
| 312 |
+
...
|
| 313 |
+
|
| 314 |
+
def log10(self, x0: T) -> T:
|
| 315 |
+
...
|
| 316 |
+
|
| 317 |
+
def nextafter(self, x0: T, x1: T) -> T:
|
| 318 |
+
...
|
| 319 |
+
|
| 320 |
+
def logical_and(self, x0: T, x1: T) -> T:
|
| 321 |
+
...
|
| 322 |
+
|
| 323 |
+
def logical_not(self, x0: T) -> T:
|
| 324 |
+
...
|
| 325 |
+
|
| 326 |
+
def logical_or(self, x0: T, x1: T) -> T:
|
| 327 |
+
...
|
| 328 |
+
|
| 329 |
+
def logical_xor(self, x0: T, x1: T) -> T:
|
| 330 |
+
...
|
| 331 |
+
|
| 332 |
+
def bitwise_and(self, x0: T, x1: T) -> T:
|
| 333 |
+
...
|
| 334 |
+
|
| 335 |
+
def bitwise_not(self, x0: T) -> T:
|
| 336 |
+
...
|
| 337 |
+
|
| 338 |
+
def bitwise_or(self, x0: T, x1: T) -> T:
|
| 339 |
+
...
|
| 340 |
+
|
| 341 |
+
def bitwise_xor(self, x0: T, x1: T) -> T:
|
| 342 |
+
...
|
| 343 |
+
|
| 344 |
+
def bitwise_left_shift(self, x0: T, x1: T) -> T:
|
| 345 |
+
...
|
| 346 |
+
|
| 347 |
+
def bitwise_right_shift(self, x0: T, x1: T) -> T:
|
| 348 |
+
...
|
| 349 |
+
|
| 350 |
+
def rsqrt(self, x0: T) -> T:
|
| 351 |
+
...
|
| 352 |
+
|
| 353 |
+
def log1p(self, x0: T) -> T:
|
| 354 |
+
...
|
| 355 |
+
|
| 356 |
+
def tan(self, x0: T) -> T:
|
| 357 |
+
...
|
| 358 |
+
|
| 359 |
+
def tanh(self, x0: T) -> T:
|
| 360 |
+
...
|
| 361 |
+
|
| 362 |
+
def sigmoid(self, x0: T) -> T:
|
| 363 |
+
...
|
| 364 |
+
|
| 365 |
+
def signbit(self, x0: T) -> T:
|
| 366 |
+
...
|
| 367 |
+
|
| 368 |
+
def fmod(self, x0: T, x1: T) -> T:
|
| 369 |
+
...
|
| 370 |
+
|
| 371 |
+
def log(self, x0: T) -> T:
|
| 372 |
+
...
|
| 373 |
+
|
| 374 |
+
def isinf(self, x0: T) -> T:
|
| 375 |
+
...
|
| 376 |
+
|
| 377 |
+
def isnan(self, x0: T) -> T:
|
| 378 |
+
...
|
| 379 |
+
|
| 380 |
+
def round(self, x0: T) -> T:
|
| 381 |
+
...
|
| 382 |
+
|
| 383 |
+
def floor(self, x0: T) -> T:
|
| 384 |
+
...
|
| 385 |
+
|
| 386 |
+
def sign(self, x0: T) -> T:
|
| 387 |
+
...
|
| 388 |
+
|
| 389 |
+
def to_int(self, x0: T) -> T:
|
| 390 |
+
...
|
| 391 |
+
|
| 392 |
+
def trunc(self, x0: T) -> T:
|
| 393 |
+
...
|
| 394 |
+
|
| 395 |
+
def truncdiv(self, x0: T, x1: T) -> T:
|
| 396 |
+
...
|
| 397 |
+
|
| 398 |
+
def ceil(self, x0: T) -> T:
|
| 399 |
+
...
|
| 400 |
+
|
| 401 |
+
def neg(self, x0: T) -> T:
|
| 402 |
+
...
|
| 403 |
+
|
| 404 |
+
def reciprocal(self, x0: T) -> T:
|
| 405 |
+
...
|
| 406 |
+
|
| 407 |
+
def eq(self, x0: T, x1: T) -> T:
|
| 408 |
+
...
|
| 409 |
+
|
| 410 |
+
def ne(self, x0: T, x1: T) -> T:
|
| 411 |
+
...
|
| 412 |
+
|
| 413 |
+
def lt(self, x0: T, x1: T) -> T:
|
| 414 |
+
...
|
| 415 |
+
|
| 416 |
+
def gt(self, x0: T, x1: T) -> T:
|
| 417 |
+
...
|
| 418 |
+
|
| 419 |
+
def le(self, x0: T, x1: T) -> T:
|
| 420 |
+
...
|
| 421 |
+
|
| 422 |
+
def ge(self, x0: T, x1: T) -> T:
|
| 423 |
+
...
|
| 424 |
+
|
| 425 |
+
def add(self, x0: T, x1: T) -> T:
|
| 426 |
+
...
|
| 427 |
+
|
| 428 |
+
def sub(self, x0: T, x1: T) -> T:
|
| 429 |
+
...
|
| 430 |
+
|
| 431 |
+
def mul(self, x0: T, x1: T) -> T:
|
| 432 |
+
...
|
| 433 |
+
|
| 434 |
+
def floordiv(self, x0: T, x1: T) -> T:
|
| 435 |
+
...
|
| 436 |
+
|
| 437 |
+
def truediv(self, x0: T, x1: T) -> T:
|
| 438 |
+
...
|
| 439 |
+
|
| 440 |
+
def div(self, x0: T, x1: T) -> T:
|
| 441 |
+
...
|
| 442 |
+
|
| 443 |
+
def mod(self, x0: T, x1: T) -> T:
|
| 444 |
+
...
|
| 445 |
+
|
| 446 |
+
def pow(self, x0: T, x1: T) -> T:
|
| 447 |
+
...
|
| 448 |
+
|
| 449 |
+
def and_(self, x0: T, x1: T) -> T:
|
| 450 |
+
...
|
| 451 |
+
|
| 452 |
+
def or_(self, x0: T, x1: T) -> T:
|
| 453 |
+
...
|
| 454 |
+
|
| 455 |
+
def xor(self, x0: T, x1: T) -> T:
|
| 456 |
+
...
|
| 457 |
+
|
| 458 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 459 |
+
# In CUDA, optimized implementations of other mathematical operations are
|
| 460 |
+
# offered separately via libdevice for double precision computation (in
|
| 461 |
+
# Triton, these go to tl.math rather than tl). We lower to these
|
| 462 |
+
# operators when doing FP64 on CUDA. Note that some operators
|
| 463 |
+
# unconditional go to tl.math.
|
| 464 |
+
#
|
| 465 |
+
# TODO(ezyang): Is this really the best way to do this? What if we have
|
| 466 |
+
# abs internally route to tl.math automatically when given a double
|
| 467 |
+
# precision input? One reason is that when doing codegen, we often don't
|
| 468 |
+
# know what the dtype of the inputs are! (In principle we do know, but
|
| 469 |
+
# for many analyses it's not conveniently available.)
|
| 470 |
+
|
| 471 |
+
def libdevice_abs(self, x0: T) -> T:
|
| 472 |
+
...
|
| 473 |
+
|
| 474 |
+
def libdevice_exp(self, x0: T) -> T:
|
| 475 |
+
...
|
| 476 |
+
|
| 477 |
+
def libdevice_sqrt(self, x0: T) -> T:
|
| 478 |
+
...
|
| 479 |
+
|
| 480 |
+
def libdevice_cos(self, x0: T) -> T:
|
| 481 |
+
...
|
| 482 |
+
|
| 483 |
+
def libdevice_sin(self, x0: T) -> T:
|
| 484 |
+
...
|
| 485 |
+
|
| 486 |
+
def libdevice_sigmoid(self, x0: T) -> T:
|
| 487 |
+
...
|
| 488 |
+
|
| 489 |
+
def libdevice_log(self, x0: T) -> T:
|
| 490 |
+
...
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
class MockHandler:
|
| 494 |
+
def __getattr__(self, name):
|
| 495 |
+
if name == "name":
|
| 496 |
+
return "MockHandler"
|
| 497 |
+
|
| 498 |
+
def inner(*args, **kwargs):
|
| 499 |
+
fargs = [_arg_str(a) for a in args]
|
| 500 |
+
fargs.extend(f"{k}={v}" for k, v in kwargs.items())
|
| 501 |
+
return f"ops.{name}({', '.join(fargs)})"
|
| 502 |
+
|
| 503 |
+
return inner
|
| 504 |
+
|
| 505 |
+
@staticmethod
|
| 506 |
+
def masked(mask, body, other) -> str:
|
| 507 |
+
return f"ops.masked({mask}, {body()}, {other})"
|
| 508 |
+
|
| 509 |
+
@staticmethod
|
| 510 |
+
def frexp(x):
|
| 511 |
+
return (f"ops.frexp({x})[0]", f"ops.frexp({x})[1]")
|
| 512 |
+
|
| 513 |
+
@staticmethod
|
| 514 |
+
def indirect_indexing(index_var, size, check=True) -> sympy.Symbol:
|
| 515 |
+
return sympy_index_symbol(f"({str(index_var)})")
|
| 516 |
+
|
| 517 |
+
@classmethod
|
| 518 |
+
def _init_cls(cls):
|
| 519 |
+
def make_handler(format_string):
|
| 520 |
+
@staticmethod # type: ignore[misc]
|
| 521 |
+
def inner(*args):
|
| 522 |
+
return format_string.format(*args)
|
| 523 |
+
|
| 524 |
+
return inner
|
| 525 |
+
|
| 526 |
+
for name, format_string in itertools.chain(
|
| 527 |
+
magic_methods.items(), inplace_methods.items()
|
| 528 |
+
):
|
| 529 |
+
setattr(cls, name, make_handler(format_string))
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
MockHandler._init_cls()
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
# Use mypy to check protocol implemented correctly
|
| 536 |
+
def _typecheck_MockHandler(h: MockHandler) -> OpsHandler[str]:
|
| 537 |
+
return h
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
class KernelFormatterHandler:
|
| 541 |
+
def __init__(self, parent_handler):
|
| 542 |
+
self.parent_handler = parent_handler
|
| 543 |
+
self.output = IndentedBuffer(1)
|
| 544 |
+
self.var_counter = itertools.count()
|
| 545 |
+
|
| 546 |
+
@staticmethod
|
| 547 |
+
def ir_to_string(ir_fn, index, rindex=None) -> str:
|
| 548 |
+
from .ir import FlexibleLayout
|
| 549 |
+
from .virtualized import V
|
| 550 |
+
|
| 551 |
+
args = [index, rindex] if rindex is not None else [index]
|
| 552 |
+
names = ["index", "rindex"] if rindex is not None else ["index"]
|
| 553 |
+
formatter = KernelFormatterHandler(MockHandler())
|
| 554 |
+
|
| 555 |
+
with formatter.output.indent(-1):
|
| 556 |
+
formatter.output.writeline(f"def inner_fn({', '.join(names)}):")
|
| 557 |
+
for name, arg in zip(names, args):
|
| 558 |
+
if arg:
|
| 559 |
+
lhs = ", ".join(
|
| 560 |
+
[
|
| 561 |
+
str("_" if isinstance(v, (int, sympy.Integer)) else v)
|
| 562 |
+
for v in arg
|
| 563 |
+
]
|
| 564 |
+
)
|
| 565 |
+
formatter.output.writeline(f"{lhs} = {name}")
|
| 566 |
+
|
| 567 |
+
with V.set_ops_handler(formatter), patch.object(
|
| 568 |
+
FlexibleLayout, "allow_indexing", True
|
| 569 |
+
):
|
| 570 |
+
result = ir_fn(*args)
|
| 571 |
+
return formatter.getvalue(result)
|
| 572 |
+
|
| 573 |
+
def __getattr__(self, name) -> Callable[..., Any]:
|
| 574 |
+
def inner(*args, **kwargs):
|
| 575 |
+
line = getattr(self.parent_handler, name)(*args, **kwargs)
|
| 576 |
+
if name == "indirect_indexing":
|
| 577 |
+
return line
|
| 578 |
+
|
| 579 |
+
def write(line):
|
| 580 |
+
# replace line with a new variable name
|
| 581 |
+
varname = f"tmp{next(self.var_counter)}"
|
| 582 |
+
self.output.writeline(f"{varname} = {line}")
|
| 583 |
+
return varname
|
| 584 |
+
|
| 585 |
+
return pytree.tree_map(write, line)
|
| 586 |
+
|
| 587 |
+
return inner
|
| 588 |
+
|
| 589 |
+
def reduction(
|
| 590 |
+
self,
|
| 591 |
+
dtype: torch.dtype,
|
| 592 |
+
src_dtype: torch.dtype,
|
| 593 |
+
reduction_type: ReductionType,
|
| 594 |
+
value: Union[str, Tuple[str, ...]],
|
| 595 |
+
) -> Union[str, Tuple[str, ...]]:
|
| 596 |
+
line = self.parent_handler.reduction(dtype, src_dtype, reduction_type, value)
|
| 597 |
+
num_values = reduction_num_outputs(reduction_type)
|
| 598 |
+
varnames = [f"tmp{next(self.var_counter)}" for _ in range(num_values)]
|
| 599 |
+
self.output.writeline(f"{','.join(varnames)} = {line}")
|
| 600 |
+
return tuple(varnames) if num_values > 1 else varnames[0]
|
| 601 |
+
|
| 602 |
+
def getvalue(self, result):
|
| 603 |
+
self.output.writeline(f"return {result}")
|
| 604 |
+
return self.output.getvalue()
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
# Use mypy to check protocol implemented correctly
|
| 608 |
+
def _typecheck_KernelFormatterHandler(h: KernelFormatterHandler) -> OpsHandler[str]:
|
| 609 |
+
return h
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class WrapperHandler(Generic[T]):
|
| 613 |
+
def __init__(self, inner: OpsHandler[T]):
|
| 614 |
+
self._inner = inner
|
| 615 |
+
|
| 616 |
+
def __getattr__(self, item):
|
| 617 |
+
return getattr(self._inner, item)
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
# Use mypy to check protocol implemented correctly
|
| 621 |
+
def _typecheck_WrapperHandler(h: WrapperHandler[T]) -> OpsHandler[T]:
|
| 622 |
+
return h
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class OpCounterCSE:
|
| 626 |
+
"""Shim to count how many ops are used"""
|
| 627 |
+
|
| 628 |
+
def __init__(self, inner):
|
| 629 |
+
super().__init__()
|
| 630 |
+
self.parent_handler = inner
|
| 631 |
+
self.op_count = 0
|
| 632 |
+
self.var_names = {}
|
| 633 |
+
|
| 634 |
+
def __getattr__(self, name):
|
| 635 |
+
def inner(*args, **kwargs):
|
| 636 |
+
val = getattr(self.parent_handler, name)(*args, **kwargs)
|
| 637 |
+
if name == "indirect_indexing":
|
| 638 |
+
return val
|
| 639 |
+
|
| 640 |
+
def count(val):
|
| 641 |
+
if val not in self.var_names:
|
| 642 |
+
varname = f"tmp{self.op_count}"
|
| 643 |
+
self.op_count += 1
|
| 644 |
+
self.var_names[val] = varname
|
| 645 |
+
return varname
|
| 646 |
+
else:
|
| 647 |
+
return self.var_names[val]
|
| 648 |
+
|
| 649 |
+
return pytree.tree_map(count, val)
|
| 650 |
+
|
| 651 |
+
return inner
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def _typecheck_OpCounterCSE(h: OpCounterCSE) -> OpsHandler[str]:
|
| 655 |
+
return h
|
vila/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import sympy
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
| 7 |
+
from .ir import LoopBody
|
| 8 |
+
from .utils import dominated_nodes
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def val_expressable_in_32_bits(val):
|
| 12 |
+
if getattr(val, "is_Boolean", False):
|
| 13 |
+
return True
|
| 14 |
+
|
| 15 |
+
if isinstance(val, sympy.Expr):
|
| 16 |
+
assert val.is_number
|
| 17 |
+
if val.is_Integer or val.is_Boolean:
|
| 18 |
+
val = int(val)
|
| 19 |
+
else:
|
| 20 |
+
val = float(val)
|
| 21 |
+
|
| 22 |
+
# bound within mantissa
|
| 23 |
+
if isinstance(val, float):
|
| 24 |
+
return val <= (2**24) and val >= -(2**24)
|
| 25 |
+
|
| 26 |
+
if isinstance(val, int):
|
| 27 |
+
iinfo = torch.iinfo(torch.int32)
|
| 28 |
+
return val <= iinfo.max and val >= iinfo.min
|
| 29 |
+
|
| 30 |
+
raise Exception(f"Unexpected value {val}")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def range_expressable_in_32_bits(range):
|
| 34 |
+
return val_expressable_in_32_bits(range.lower) and val_expressable_in_32_bits(
|
| 35 |
+
range.upper
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def try_to_reduce_precision(node, bounds, indirect_vars, indices, replacement_vals):
|
| 40 |
+
# if a downstream use of a node explicitly converts to int32, or float16/float32/float64,
|
| 41 |
+
# then it's precision is set for that chain of uses, and we don't need to consider those
|
| 42 |
+
# dominated values
|
| 43 |
+
def skip_filter(node):
|
| 44 |
+
return node.target == "to_dtype" and node.args[2] in (
|
| 45 |
+
torch.int32,
|
| 46 |
+
torch.float32,
|
| 47 |
+
torch.float64,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# TODO - there are dominated uses whose dtype does not depend on whether
|
| 51 |
+
# we reduce the precision here, e.g. add(int64, int64) one of the args can be reduced to
|
| 52 |
+
# int32 without changing the output precision of the node. this case hasn't shown up
|
| 53 |
+
for dominated in dominated_nodes([node], skip_filter):
|
| 54 |
+
if dominated.target in ["store", "output"]:
|
| 55 |
+
continue
|
| 56 |
+
|
| 57 |
+
if isinstance(dominated.target, str) and "set_indirect" in dominated.target:
|
| 58 |
+
idx = int(dominated.target[len("set_indirect") :])
|
| 59 |
+
indirect_var = indirect_vars[idx]
|
| 60 |
+
|
| 61 |
+
# We check that we can compute all the indices it's involved in with int32
|
| 62 |
+
for index, expr in indices.items():
|
| 63 |
+
if indirect_var in expr.free_symbols:
|
| 64 |
+
index_val = replacement_vals[index]
|
| 65 |
+
|
| 66 |
+
if math.isinf(index_val.lower) or math.isinf(index_val.upper):
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
# all indices are integers, so make sure that we
|
| 70 |
+
# use the bounds of integers instead of floats.
|
| 71 |
+
# TODO - not sure if we should be doing int/float casts while tracing,
|
| 72 |
+
# might interfere with sympy.
|
| 73 |
+
|
| 74 |
+
index_val_int = ValueRanges[sympy.Expr](
|
| 75 |
+
int(index_val.lower), int(index_val.upper)
|
| 76 |
+
)
|
| 77 |
+
if not range_expressable_in_32_bits(index_val_int):
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
if not range_expressable_in_32_bits(bounds[dominated]):
|
| 81 |
+
return
|
| 82 |
+
|
| 83 |
+
args = list(node.args)
|
| 84 |
+
args[2] = torch.int32
|
| 85 |
+
node.args = tuple(args)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def indexing_dtype_strength_reduction(loop_body: LoopBody):
|
| 89 |
+
"""
|
| 90 |
+
Performs Value Range Analysis on LoopBody's fx graph to reduce precision of
|
| 91 |
+
intermediaries from int64 to int32
|
| 92 |
+
"""
|
| 93 |
+
bv = loop_body.bounds()
|
| 94 |
+
|
| 95 |
+
int64_dtype_nodes = [
|
| 96 |
+
node
|
| 97 |
+
for node in loop_body.get_nodes()
|
| 98 |
+
if (
|
| 99 |
+
node.target == "to_dtype"
|
| 100 |
+
and node.args[2] == torch.int64
|
| 101 |
+
and node not in bv.unbounded_vars
|
| 102 |
+
)
|
| 103 |
+
]
|
| 104 |
+
if not int64_dtype_nodes:
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
bounds = bv.get_bounds()
|
| 108 |
+
|
| 109 |
+
# TODO - if dominated node of one to_dtype is not expressible in int32,
|
| 110 |
+
# we should short circuit another to_dtype node if that node also dominates
|
| 111 |
+
for node in int64_dtype_nodes:
|
| 112 |
+
try_to_reduce_precision(
|
| 113 |
+
node,
|
| 114 |
+
bounds,
|
| 115 |
+
loop_body.indirect_vars,
|
| 116 |
+
loop_body.indexing_exprs,
|
| 117 |
+
bv.replacement_vals,
|
| 118 |
+
)
|
vila/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def register_quantized_ops():
|
| 5 |
+
from . import lowering
|
| 6 |
+
|
| 7 |
+
quantized = torch.ops.quantized
|
| 8 |
+
|
| 9 |
+
lowering.add_needs_realized_inputs(
|
| 10 |
+
[
|
| 11 |
+
quantized.max_pool2d,
|
| 12 |
+
]
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
lowering.make_fallback(quantized.max_pool2d)
|
vila/lib/python3.10/site-packages/torch/_inductor/scheduler.py
ADDED
|
@@ -0,0 +1,2445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import math
|
| 7 |
+
import operator
|
| 8 |
+
import os
|
| 9 |
+
import pprint
|
| 10 |
+
import textwrap
|
| 11 |
+
from typing import (
|
| 12 |
+
Any,
|
| 13 |
+
Counter,
|
| 14 |
+
DefaultDict,
|
| 15 |
+
Dict,
|
| 16 |
+
Generic,
|
| 17 |
+
List,
|
| 18 |
+
Optional,
|
| 19 |
+
Sequence,
|
| 20 |
+
Set,
|
| 21 |
+
Tuple,
|
| 22 |
+
TypeVar,
|
| 23 |
+
Union,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
import sympy
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
from torch._dynamo.utils import dynamo_timed
|
| 30 |
+
from torch._inductor.metrics import get_metric_table, is_metric_table_enabled
|
| 31 |
+
from torch.utils._triton import has_triton
|
| 32 |
+
|
| 33 |
+
from . import comms, config, dependencies, ir, metrics
|
| 34 |
+
from .codegen.common import get_scheduling_for_device, Kernel
|
| 35 |
+
from .comm_analysis import estimate_nccl_collective_runtime
|
| 36 |
+
from .dependencies import Dep, MemoryDep, StarDep, WeakDep
|
| 37 |
+
from .ir import ComputedBuffer, MultiOutput, MultiOutputLayout
|
| 38 |
+
from .sizevars import SimplifyIndexing
|
| 39 |
+
from .utils import (
|
| 40 |
+
cache_on_self,
|
| 41 |
+
cmp,
|
| 42 |
+
free_symbol_has,
|
| 43 |
+
get_device_tflops,
|
| 44 |
+
get_dtype_size,
|
| 45 |
+
get_gpu_dram_gbps,
|
| 46 |
+
green_text,
|
| 47 |
+
is_collective,
|
| 48 |
+
is_wait,
|
| 49 |
+
red_text,
|
| 50 |
+
sympy_product,
|
| 51 |
+
)
|
| 52 |
+
from .virtualized import V
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
log = logging.getLogger(__name__)
|
| 56 |
+
fusion_log = torch._logging.getArtifactLogger(__name__, "fusion")
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class WhyNoFuse:
|
| 60 |
+
# TODO when we drop support for Python < 3.10, we can use
|
| 61 |
+
# @dataclass(slots=True) instead of manually specifying __slots__.
|
| 62 |
+
__slots__ = ["node1", "node2", "reason", "args"]
|
| 63 |
+
reason: str
|
| 64 |
+
args: Tuple[Any, ...]
|
| 65 |
+
|
| 66 |
+
def __init__(self, node1: "BaseSchedulerNode", node2: "BaseSchedulerNode"):
|
| 67 |
+
self.node1 = node1
|
| 68 |
+
self.node2 = node2
|
| 69 |
+
|
| 70 |
+
def __call__(self, reason, *args):
|
| 71 |
+
self.reason = reason
|
| 72 |
+
self.args = args
|
| 73 |
+
fusion_log.debug(self)
|
| 74 |
+
|
| 75 |
+
def __str__(self):
|
| 76 |
+
return f"cannot fuse {self.node1.get_name()} with {self.node2.get_name()}: " + (
|
| 77 |
+
self.reason % self.args
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def pformat(obj):
|
| 82 |
+
if isinstance(obj, set):
|
| 83 |
+
# pformat has trouble with sets of sympy exprs
|
| 84 |
+
obj = sorted(obj, key=str)
|
| 85 |
+
result = pprint.pformat(obj, indent=4)
|
| 86 |
+
if "\n" in result:
|
| 87 |
+
return f"\n{textwrap.indent(result, ' '*4)}"
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class OutputNode:
|
| 92 |
+
def __init__(self, dep):
|
| 93 |
+
self.unmet_dependencies = {dep}
|
| 94 |
+
self.inverse_users = []
|
| 95 |
+
|
| 96 |
+
def is_reduction(self):
|
| 97 |
+
return False
|
| 98 |
+
|
| 99 |
+
def get_alias_names(self):
|
| 100 |
+
return ()
|
| 101 |
+
|
| 102 |
+
def get_name(self):
|
| 103 |
+
return "OUTPUT"
|
| 104 |
+
|
| 105 |
+
__repr__ = get_name
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def _prune_redundant_deps(node, name_to_fused_node):
|
| 109 |
+
"""
|
| 110 |
+
Prunes weakdeps intended for mutation ordering
|
| 111 |
+
on an upstream fused node if after fusion there is another dependency
|
| 112 |
+
on the fused upstream node, making the weakdep redundant
|
| 113 |
+
|
| 114 |
+
In essence this enforces an ordering on fusions. As fusions occur, weakdeps will
|
| 115 |
+
be incrementally removed, enabling other fusions, ensuring they are fused in order.
|
| 116 |
+
"""
|
| 117 |
+
name_to_dep_count: Counter[str] = collections.Counter()
|
| 118 |
+
|
| 119 |
+
for dep in node.unmet_dependencies:
|
| 120 |
+
if not isinstance(dep, WeakDep):
|
| 121 |
+
name_to_dep_count[name_to_fused_node[dep.name].get_name()] += 1
|
| 122 |
+
|
| 123 |
+
def should_prune(dep):
|
| 124 |
+
if isinstance(dep, WeakDep):
|
| 125 |
+
is_redundant = (
|
| 126 |
+
name_to_dep_count[name_to_fused_node[dep.name].get_name()] > 0
|
| 127 |
+
)
|
| 128 |
+
# These can occur because fused nodes always gather deps from their snodes
|
| 129 |
+
# If B has a weakdep on A
|
| 130 |
+
# B gets fused with C, then any time BC is fused, the weakdep will reappear
|
| 131 |
+
is_self_dep = name_to_fused_node[dep.name] == node
|
| 132 |
+
return is_redundant or is_self_dep
|
| 133 |
+
else:
|
| 134 |
+
return False
|
| 135 |
+
|
| 136 |
+
deps_to_prune = {dep for dep in node.unmet_dependencies if should_prune(dep)}
|
| 137 |
+
|
| 138 |
+
if deps_to_prune:
|
| 139 |
+
node.unmet_dependencies = node.unmet_dependencies - deps_to_prune
|
| 140 |
+
node.set_read_writes(node.read_writes.remove_reads(deps_to_prune))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
# TODO(xmfan): reuse an existing mapping for this if it exists, or formalize this into ir.py:ExternKernel
|
| 144 |
+
kernel_name_to_op = {
|
| 145 |
+
"extern_kernels.convolution": torch.ops.aten.convolution,
|
| 146 |
+
"extern_kernels.mm": torch.ops.aten.mm,
|
| 147 |
+
"extern_kernels.bmm": torch.ops.aten.bmm,
|
| 148 |
+
"extern_kernels.addmm": torch.ops.aten.addmm,
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class BaseSchedulerNode:
|
| 153 |
+
def __init__(self, scheduler: "Scheduler", node: ir.Buffer):
|
| 154 |
+
self.scheduler: Scheduler = scheduler
|
| 155 |
+
self.node: ir.Buffer = node
|
| 156 |
+
self.users: List[NodeUser] = []
|
| 157 |
+
self.inverse_users: List[BaseSchedulerNode] = []
|
| 158 |
+
self.node_users: List[BaseSchedulerNode] = []
|
| 159 |
+
self.set_read_writes(node.get_read_writes())
|
| 160 |
+
self.ancestors: Set[str] = set()
|
| 161 |
+
self.min_order: int
|
| 162 |
+
self.max_order: int
|
| 163 |
+
self.last_usage: Set[
|
| 164 |
+
str
|
| 165 |
+
] = set() # buffers that won't be used after this kernel
|
| 166 |
+
self.written = False
|
| 167 |
+
|
| 168 |
+
def __repr__(self):
|
| 169 |
+
return f"{type(self).__name__}(name={self.get_name()!r})"
|
| 170 |
+
|
| 171 |
+
def debug_str(self) -> str:
|
| 172 |
+
"""Longer form printout for trace logs"""
|
| 173 |
+
name = self.get_name()
|
| 174 |
+
lines = [
|
| 175 |
+
f"{name}: {type(self).__name__}({type(getattr(self, 'node', None)).__name__})",
|
| 176 |
+
f"{name}.writes = {pformat(self.read_writes.writes)}",
|
| 177 |
+
f"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}",
|
| 178 |
+
f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}",
|
| 179 |
+
f"{name}.users = {self.users}",
|
| 180 |
+
]
|
| 181 |
+
try:
|
| 182 |
+
lines += [
|
| 183 |
+
self.debug_str_extra(),
|
| 184 |
+
]
|
| 185 |
+
except Exception:
|
| 186 |
+
log.warning("Ignoring error in debug_str()", exc_info=True)
|
| 187 |
+
|
| 188 |
+
return "\n".join(lines).rstrip()
|
| 189 |
+
|
| 190 |
+
def debug_str_extra(self) -> str:
|
| 191 |
+
return ""
|
| 192 |
+
|
| 193 |
+
def log_details(self):
|
| 194 |
+
log.info(
|
| 195 |
+
"%s: unmet_dependencies = %s, writes = %s",
|
| 196 |
+
self,
|
| 197 |
+
self.unmet_dependencies,
|
| 198 |
+
self.read_writes.writes,
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
def update_mutated_names(self, renames: Dict[str, str]):
|
| 202 |
+
self.set_read_writes(self.read_writes.rename(renames))
|
| 203 |
+
|
| 204 |
+
def add_mutation_dep(self, dep):
|
| 205 |
+
self.set_read_writes(self.read_writes.with_read(dep))
|
| 206 |
+
|
| 207 |
+
def add_fake_dep(self, dep):
|
| 208 |
+
self.set_read_writes(self.read_writes.with_read(dep))
|
| 209 |
+
|
| 210 |
+
def set_users(self, users: List["NodeUser"]):
|
| 211 |
+
# deduplicate
|
| 212 |
+
result: Dict[int, NodeUser] = {}
|
| 213 |
+
for use in users:
|
| 214 |
+
if id(use.node) in result:
|
| 215 |
+
result[id(use.node)] = use.merge(result[id(use.node)])
|
| 216 |
+
else:
|
| 217 |
+
result[id(use.node)] = use
|
| 218 |
+
self.users = list(result.values())
|
| 219 |
+
|
| 220 |
+
def set_last_usage(
|
| 221 |
+
self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
|
| 222 |
+
):
|
| 223 |
+
used_buffers = self.used_or_aliased_buffer_names()
|
| 224 |
+
used_buffers = {mutation_real_name.get(k, k) for k in used_buffers}
|
| 225 |
+
self.last_usage = used_buffers - future_used_buffers
|
| 226 |
+
|
| 227 |
+
def get_aliases(self):
|
| 228 |
+
return self.node.get_alias_names()
|
| 229 |
+
|
| 230 |
+
def get_mutations(self):
|
| 231 |
+
return self.node.get_mutation_names()
|
| 232 |
+
|
| 233 |
+
def has_aliasing_or_mutation(self):
|
| 234 |
+
return bool(self.get_aliases() or self.get_mutations())
|
| 235 |
+
|
| 236 |
+
def set_read_writes(self, rw: dependencies.ReadWrites):
|
| 237 |
+
self.read_writes: dependencies.ReadWrites = rw
|
| 238 |
+
self.unmet_dependencies = self.read_writes.reads
|
| 239 |
+
self.prune_deps()
|
| 240 |
+
|
| 241 |
+
def op_counts(self):
|
| 242 |
+
return self.read_writes.op_counts
|
| 243 |
+
|
| 244 |
+
def used_buffer_names(self) -> Set[str]:
|
| 245 |
+
return {
|
| 246 |
+
dep.name
|
| 247 |
+
for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes)
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
def used_or_aliased_buffer_names(self) -> Set[str]:
|
| 251 |
+
used_names = set()
|
| 252 |
+
|
| 253 |
+
for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes):
|
| 254 |
+
used_names.add(dep.name)
|
| 255 |
+
if V.graph.name_to_buffer.get(dep.name):
|
| 256 |
+
layout = V.graph.name_to_buffer[dep.name].get_layout()
|
| 257 |
+
# needed to avoid deallocating aliased buffer
|
| 258 |
+
# if there are still uses of aliases ahead
|
| 259 |
+
if isinstance(layout, ir.AliasedLayout):
|
| 260 |
+
used_names.add(layout.view.data.get_name())
|
| 261 |
+
return used_names
|
| 262 |
+
|
| 263 |
+
def prune_deps(self):
|
| 264 |
+
self.unmet_dependencies = {
|
| 265 |
+
dep
|
| 266 |
+
for dep in self.unmet_dependencies
|
| 267 |
+
if dep.name not in self.scheduler.available_buffer_names
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
def prune_weak_deps(self):
|
| 271 |
+
# Prune weak dependencies on buffers that have been removed
|
| 272 |
+
def should_prune(dep):
|
| 273 |
+
return isinstance(dep, WeakDep) and dep.name in V.graph.removed_buffers
|
| 274 |
+
|
| 275 |
+
to_remove = {dep for dep in self.read_writes.reads if should_prune(dep)}
|
| 276 |
+
self.set_read_writes(self.read_writes.remove_reads(to_remove))
|
| 277 |
+
|
| 278 |
+
def prune_redundant_deps(self, name_to_fused_node):
|
| 279 |
+
_prune_redundant_deps(self, name_to_fused_node)
|
| 280 |
+
|
| 281 |
+
def get_name(self) -> str:
|
| 282 |
+
return self.node.get_name()
|
| 283 |
+
|
| 284 |
+
def get_first_name(self) -> str:
|
| 285 |
+
return self.get_name()
|
| 286 |
+
|
| 287 |
+
def get_names(self) -> Set[str]:
|
| 288 |
+
return {self.get_name()}
|
| 289 |
+
|
| 290 |
+
def get_nodes(self) -> Sequence["BaseSchedulerNode"]:
|
| 291 |
+
return [self]
|
| 292 |
+
|
| 293 |
+
def get_device(self):
|
| 294 |
+
return self.node.get_device()
|
| 295 |
+
|
| 296 |
+
def is_reduction(self):
|
| 297 |
+
return False
|
| 298 |
+
|
| 299 |
+
def is_split_scan(self):
|
| 300 |
+
return False
|
| 301 |
+
|
| 302 |
+
def is_template(self):
|
| 303 |
+
return False
|
| 304 |
+
|
| 305 |
+
def is_extern(self):
|
| 306 |
+
return False
|
| 307 |
+
|
| 308 |
+
def is_foreach(self):
|
| 309 |
+
return False
|
| 310 |
+
|
| 311 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 312 |
+
return False
|
| 313 |
+
|
| 314 |
+
def has_side_effects(self):
|
| 315 |
+
return False
|
| 316 |
+
|
| 317 |
+
def decide_inplace_update(self):
|
| 318 |
+
"""
|
| 319 |
+
Decide if there should be inplace updates for the node
|
| 320 |
+
and record the decision in the active kernel.
|
| 321 |
+
"""
|
| 322 |
+
if not self.node.should_allocate():
|
| 323 |
+
return
|
| 324 |
+
|
| 325 |
+
if isinstance(self, (SchedulerNode,)) and (
|
| 326 |
+
self.node.get_alias_names() or self.node.get_mutation_names()
|
| 327 |
+
):
|
| 328 |
+
return
|
| 329 |
+
|
| 330 |
+
if (
|
| 331 |
+
(
|
| 332 |
+
isinstance(self, (SchedulerNode,))
|
| 333 |
+
# o what have i done. lets make this an api
|
| 334 |
+
or (
|
| 335 |
+
isinstance(self, ExternKernelSchedulerNode)
|
| 336 |
+
and isinstance(self.node, (ir.AllReduce, ir.InPlaceHint))
|
| 337 |
+
)
|
| 338 |
+
)
|
| 339 |
+
and config.inplace_buffers
|
| 340 |
+
and (
|
| 341 |
+
not isinstance(V.kernel, torch._inductor.codegen.triton.TritonKernel)
|
| 342 |
+
or getattr(V.kernel, "mutations", None) is not None
|
| 343 |
+
)
|
| 344 |
+
):
|
| 345 |
+
from .codegen.wrapper import buffer_reuse_key
|
| 346 |
+
|
| 347 |
+
ordered_reads = sorted(self.read_writes.reads, key=lambda x: x.name)
|
| 348 |
+
|
| 349 |
+
for read in ordered_reads:
|
| 350 |
+
input_node: Optional[
|
| 351 |
+
BaseSchedulerNode
|
| 352 |
+
] = self.scheduler.name_to_node.get(read.name)
|
| 353 |
+
if input_node and V.graph.wrapper_code.can_reuse(input_node, self):
|
| 354 |
+
assert input_node.users is not None
|
| 355 |
+
remaining_uses = [
|
| 356 |
+
x
|
| 357 |
+
for x in input_node.users
|
| 358 |
+
if x.node.get_name()
|
| 359 |
+
not in self.scheduler.available_buffer_names
|
| 360 |
+
]
|
| 361 |
+
if (
|
| 362 |
+
len(remaining_uses) == 1
|
| 363 |
+
and remaining_uses[0].can_inplace
|
| 364 |
+
and remaining_uses[0].node is self
|
| 365 |
+
and not isinstance(
|
| 366 |
+
input_node.node.get_layout(),
|
| 367 |
+
(
|
| 368 |
+
ir.MultiOutputLayout,
|
| 369 |
+
ir.MutationLayout,
|
| 370 |
+
ir.AliasedLayout,
|
| 371 |
+
),
|
| 372 |
+
)
|
| 373 |
+
and not (
|
| 374 |
+
isinstance(
|
| 375 |
+
input_node.node, (ir.FallbackKernel, ir.MultiOutput)
|
| 376 |
+
)
|
| 377 |
+
and len(input_node.node.get_alias_names()) > 0
|
| 378 |
+
)
|
| 379 |
+
and buffer_reuse_key(input_node.node)
|
| 380 |
+
== buffer_reuse_key(self.node)
|
| 381 |
+
):
|
| 382 |
+
# hacky check for if V.kernel is a real kernel or NullHandler
|
| 383 |
+
if hasattr(V.kernel, "args"):
|
| 384 |
+
# if there isn't a triton kernel, then we don't need to call triton-specific things.
|
| 385 |
+
# but TODO this might be a convenient place to signal to the Collective kernels to inplace
|
| 386 |
+
# (and, can we make "kernel" less generic of a name?)
|
| 387 |
+
V.kernel.args.make_inplace(
|
| 388 |
+
input_node.get_name(), self.get_name()
|
| 389 |
+
)
|
| 390 |
+
# mutations not tracked in cpp kernels
|
| 391 |
+
if isinstance(
|
| 392 |
+
V.kernel, torch._inductor.codegen.triton.TritonKernel
|
| 393 |
+
):
|
| 394 |
+
V.kernel.mutations.add(input_node.get_name())
|
| 395 |
+
V.kernel.mutations.add(self.get_name())
|
| 396 |
+
|
| 397 |
+
# update last usage of reused node
|
| 398 |
+
self.last_usage.discard(input_node.get_name())
|
| 399 |
+
|
| 400 |
+
V.kernel.inplace_update_buffers[
|
| 401 |
+
self.get_name()
|
| 402 |
+
] = input_node.get_name()
|
| 403 |
+
break
|
| 404 |
+
|
| 405 |
+
def allocate(self):
|
| 406 |
+
if not self.node.should_allocate():
|
| 407 |
+
return
|
| 408 |
+
|
| 409 |
+
if isinstance(self, (SchedulerNode,)) and (
|
| 410 |
+
self.node.get_alias_names() or self.node.get_mutation_names()
|
| 411 |
+
):
|
| 412 |
+
V.graph.wrapper_code.codegen_allocation(self.node)
|
| 413 |
+
return
|
| 414 |
+
|
| 415 |
+
# hacky check for if V.kernel is a real kernel or NullHandler
|
| 416 |
+
if (
|
| 417 |
+
hasattr(V.kernel, "args")
|
| 418 |
+
and self.get_name() in V.kernel.inplace_update_buffers
|
| 419 |
+
):
|
| 420 |
+
V.graph.wrapper_code.codegen_inplace_reuse(
|
| 421 |
+
self.scheduler.name_to_node[
|
| 422 |
+
V.kernel.inplace_update_buffers[self.get_name()]
|
| 423 |
+
].node,
|
| 424 |
+
self.node,
|
| 425 |
+
)
|
| 426 |
+
else:
|
| 427 |
+
V.graph.wrapper_code.codegen_allocation(self.node)
|
| 428 |
+
|
| 429 |
+
def can_free(self):
|
| 430 |
+
# There's no real allocated buffer, no need to free it
|
| 431 |
+
if isinstance(self.node.layout, ir.NoneLayout):
|
| 432 |
+
return False
|
| 433 |
+
for use in self.users:
|
| 434 |
+
if isinstance(use.node, OutputNode):
|
| 435 |
+
return False
|
| 436 |
+
return True
|
| 437 |
+
|
| 438 |
+
def codegen_originating_info(self, buffer, only_once=True):
|
| 439 |
+
if not config.comment_origin:
|
| 440 |
+
return
|
| 441 |
+
|
| 442 |
+
if only_once and self.written:
|
| 443 |
+
return
|
| 444 |
+
origins = self.node.origins
|
| 445 |
+
out_lines = []
|
| 446 |
+
|
| 447 |
+
for o in origins:
|
| 448 |
+
if o.op == "output":
|
| 449 |
+
# These are boring and samey
|
| 450 |
+
continue
|
| 451 |
+
|
| 452 |
+
out_lines.append("")
|
| 453 |
+
# TODO(voz): Should the pragma be constant somewhere?
|
| 454 |
+
out_lines.append("#pragma CMT ORIGIN:")
|
| 455 |
+
op_info_str = f"#pragma CMT {o.op} {o.target}"
|
| 456 |
+
if "seq_nr" in o.meta:
|
| 457 |
+
op_info_str = op_info_str + f" seq_nr:{o.meta['seq_nr']}"
|
| 458 |
+
out_lines.append(op_info_str)
|
| 459 |
+
if "stack_trace" in o.meta:
|
| 460 |
+
stack_trace = f"{o.meta['stack_trace']}"
|
| 461 |
+
stack_trace_last_line = stack_trace.split("|")[-1]
|
| 462 |
+
out_lines.append(
|
| 463 |
+
"#pragma CMT "
|
| 464 |
+
+ stack_trace_last_line.replace("{", "{{")
|
| 465 |
+
.replace("}", "}}")
|
| 466 |
+
.replace("\n", "\\")
|
| 467 |
+
)
|
| 468 |
+
out_lines.append("#pragma CMT END ORIGIN")
|
| 469 |
+
out_lines.append("")
|
| 470 |
+
|
| 471 |
+
if len(out_lines) == 0:
|
| 472 |
+
return
|
| 473 |
+
|
| 474 |
+
# TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does
|
| 475 |
+
# not use BracesBuffer, so we have no good indicator of a C++ buffer atm.
|
| 476 |
+
buffer.writelines(out_lines)
|
| 477 |
+
self.written = True
|
| 478 |
+
|
| 479 |
+
def get_read_write_buffers_sizes(self) -> int:
|
| 480 |
+
"""
|
| 481 |
+
Counting the number of bytes accessed for a kernel is
|
| 482 |
+
surprisingly tricky. In particular, there is a differentiation
|
| 483 |
+
between 'theoretical' memory accesses and practical memory
|
| 484 |
+
accesses. For example, a layernorm kernel may actually access an
|
| 485 |
+
input 3 times, but in theory, it only needs to access its input
|
| 486 |
+
once (and may be optimized to do so through say, persistent
|
| 487 |
+
reductions)
|
| 488 |
+
|
| 489 |
+
Another example is that even though a buffer is passed in, we may
|
| 490 |
+
not access the entire buffer. This may occur if we are accessing
|
| 491 |
+
a slice of the buffer. Another tricky case is for indirect
|
| 492 |
+
indexing, where the amount of bytes accessed depends on the
|
| 493 |
+
values of the input.
|
| 494 |
+
|
| 495 |
+
What this function aims to compute is the memory accesses for
|
| 496 |
+
worst-case inputs, best-case optimization. What this means is
|
| 497 |
+
that for each buffer we compute the amount of potential accesses in two ways and take the minimum.
|
| 498 |
+
|
| 499 |
+
1. Numel in ranges multiplied by number of deps the buffer has
|
| 500 |
+
2. The buffer size
|
| 501 |
+
"""
|
| 502 |
+
if isinstance(self, NopKernelSchedulerNode):
|
| 503 |
+
return 0
|
| 504 |
+
if isinstance(self, ExternKernelSchedulerNode) and isinstance(
|
| 505 |
+
self.node, MultiOutput
|
| 506 |
+
):
|
| 507 |
+
return 0
|
| 508 |
+
|
| 509 |
+
if isinstance(self, SchedulerNode):
|
| 510 |
+
node_numel = V.graph.sizevars.size_hint(
|
| 511 |
+
sympy_product(self.get_ranges()[0])
|
| 512 |
+
* sympy_product(self.get_ranges()[1])
|
| 513 |
+
)
|
| 514 |
+
else:
|
| 515 |
+
node_numel = int(1e9)
|
| 516 |
+
buf_accesses = collections.defaultdict(list)
|
| 517 |
+
for dep in self.read_writes.reads | self.read_writes.writes:
|
| 518 |
+
buf_accesses[dep.name].append(dep)
|
| 519 |
+
|
| 520 |
+
reads = {dep.name for dep in self.read_writes.reads}
|
| 521 |
+
writes = {dep.name for dep in self.read_writes.writes}
|
| 522 |
+
|
| 523 |
+
def is_materialized(buf, snodes):
|
| 524 |
+
users = self.scheduler.name_to_node[buf].users
|
| 525 |
+
buf_uses = {user.node for user in users}
|
| 526 |
+
return len(buf_uses - set(snodes)) > 0
|
| 527 |
+
|
| 528 |
+
if isinstance(self, FusedSchedulerNode):
|
| 529 |
+
removed_buffers = {
|
| 530 |
+
dep for dep in writes if not is_materialized(dep, self.snodes)
|
| 531 |
+
}
|
| 532 |
+
writes = writes - removed_buffers
|
| 533 |
+
reads = reads - removed_buffers
|
| 534 |
+
node_bytes = 0
|
| 535 |
+
|
| 536 |
+
for buf_name in reads | writes:
|
| 537 |
+
buf_accessed_elems = sum([node_numel for dep in buf_accesses[buf_name]])
|
| 538 |
+
buf: Union[ir.Buffer, ir.TensorBox]
|
| 539 |
+
if buf_name in V.graph.name_to_buffer:
|
| 540 |
+
buf = V.graph.name_to_buffer[buf_name]
|
| 541 |
+
elif buf_name in V.graph.graph_inputs:
|
| 542 |
+
buf = V.graph.graph_inputs[buf_name]
|
| 543 |
+
else:
|
| 544 |
+
continue
|
| 545 |
+
|
| 546 |
+
def get_buf_elems(buf):
|
| 547 |
+
return V.graph.sizevars.size_hint(sympy_product(buf.get_size()))
|
| 548 |
+
|
| 549 |
+
# Kind of a lazy way to get the MultiOutput nodes corresponding to
|
| 550 |
+
# a MultiOutputLayout
|
| 551 |
+
if isinstance(buf.layout, MultiOutputLayout):
|
| 552 |
+
users = self.scheduler.name_to_node[buf.get_name()].users
|
| 553 |
+
buf_elems = sum(get_buf_elems(user.node.node) for user in users)
|
| 554 |
+
else:
|
| 555 |
+
buf_elems = get_buf_elems(buf)
|
| 556 |
+
|
| 557 |
+
node_bytes += min(buf_elems, buf_accessed_elems) * get_dtype_size(
|
| 558 |
+
buf.get_dtype()
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
return node_bytes
|
| 562 |
+
|
| 563 |
+
def get_estimated_runtime(self) -> float:
|
| 564 |
+
"""
|
| 565 |
+
Returns estimated op runtime in nanoseconds (ns)
|
| 566 |
+
"""
|
| 567 |
+
layout = None
|
| 568 |
+
dtype = None
|
| 569 |
+
if not hasattr(self, "node") or not self.node:
|
| 570 |
+
assert isinstance(
|
| 571 |
+
self, (FusedSchedulerNode, ForeachKernelSchedulerNode)
|
| 572 |
+
), f"{type(self)=}"
|
| 573 |
+
assert self.snodes
|
| 574 |
+
if not self.snodes[0].node:
|
| 575 |
+
return 0
|
| 576 |
+
layout = self.snodes[0].node.get_layout()
|
| 577 |
+
dtype = self.snodes[0].node.get_dtype()
|
| 578 |
+
else:
|
| 579 |
+
layout = self.node.get_layout()
|
| 580 |
+
dtype = self.node.get_dtype()
|
| 581 |
+
|
| 582 |
+
if "cuda" != layout.device.type:
|
| 583 |
+
# default to no reordering based on runtime
|
| 584 |
+
return 0
|
| 585 |
+
|
| 586 |
+
# Collective kernels
|
| 587 |
+
if is_collective(self.node):
|
| 588 |
+
return estimate_nccl_collective_runtime(self.node)
|
| 589 |
+
elif is_wait(self.node):
|
| 590 |
+
# ir.Wait is only used for collective ops.
|
| 591 |
+
# The time needed for the collective op is already estimated and considered
|
| 592 |
+
# when we are processing the collective op IR node, so ir.Wait takes 0 time
|
| 593 |
+
# since it doesn't take extra time to get the result after the collective is completed.
|
| 594 |
+
return 0
|
| 595 |
+
|
| 596 |
+
try:
|
| 597 |
+
gpu_memory_bandwidth = get_gpu_dram_gbps()
|
| 598 |
+
gpu_flops = get_device_tflops(dtype) * 10**12
|
| 599 |
+
except Exception:
|
| 600 |
+
return 0
|
| 601 |
+
|
| 602 |
+
if isinstance(self, ExternKernelSchedulerNode):
|
| 603 |
+
assert isinstance(self.node, ir.ExternKernel), f"{type(self.node)=}"
|
| 604 |
+
op = kernel_name_to_op.get(
|
| 605 |
+
getattr(self.node, "python_kernel_name", ""), None
|
| 606 |
+
)
|
| 607 |
+
|
| 608 |
+
# if there is a resolved op, dry-run using fake mode and record flop count
|
| 609 |
+
if op is not None:
|
| 610 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 611 |
+
from torch.utils.flop_counter import FlopCounterMode
|
| 612 |
+
|
| 613 |
+
with FakeTensorMode(), FlopCounterMode(
|
| 614 |
+
display=False
|
| 615 |
+
) as flop_counter_mode:
|
| 616 |
+
from .ir import ir_node_to_tensor
|
| 617 |
+
|
| 618 |
+
fake_inputs = [
|
| 619 |
+
ir_node_to_tensor(input, guard_shape=False)
|
| 620 |
+
for input in self.node.inputs
|
| 621 |
+
]
|
| 622 |
+
cls = self.node.__class__
|
| 623 |
+
cls.process_kernel(op, *fake_inputs, **self.node.kwargs)
|
| 624 |
+
|
| 625 |
+
# TODO(xmfan): find a better heuristic to model FLOPS/latency relationship
|
| 626 |
+
factor = 1.0
|
| 627 |
+
counted_flops = flop_counter_mode.get_total_flops()
|
| 628 |
+
counted_bytes = self.get_read_write_buffers_sizes()
|
| 629 |
+
compute_time = (factor * counted_flops / gpu_flops) * 1e9
|
| 630 |
+
transfer_time = counted_bytes / gpu_memory_bandwidth
|
| 631 |
+
|
| 632 |
+
# Return estimated runtime in nanoseconds
|
| 633 |
+
return max(compute_time, transfer_time)
|
| 634 |
+
|
| 635 |
+
elif isinstance(self, FusedSchedulerNode) or isinstance(
|
| 636 |
+
self.node, ComputedBuffer
|
| 637 |
+
):
|
| 638 |
+
# Return estimated runtime in nanoseconds (bytes / gbps)
|
| 639 |
+
return self.get_read_write_buffers_sizes() / gpu_memory_bandwidth
|
| 640 |
+
|
| 641 |
+
return 0
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
class ExternKernelSchedulerNode(BaseSchedulerNode):
|
| 645 |
+
def debug_str_extra(self) -> str:
|
| 646 |
+
return f"{self.get_name()}.node.kernel = {getattr(self.node, 'python_kernel_name', None)}"
|
| 647 |
+
|
| 648 |
+
def is_extern(self):
|
| 649 |
+
return True
|
| 650 |
+
|
| 651 |
+
def has_side_effects(self):
|
| 652 |
+
return hasattr(self.node, "has_side_effects") and self.node.has_side_effects()
|
| 653 |
+
|
| 654 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 655 |
+
if self.get_aliases() or self.is_template():
|
| 656 |
+
return False
|
| 657 |
+
|
| 658 |
+
if read_dep.name not in self.scheduler.name_to_node:
|
| 659 |
+
# don't allow reuse of an 'input' buffer, we don't own it
|
| 660 |
+
# (would this have been fixed if I tracked mutations properly above?)
|
| 661 |
+
return False
|
| 662 |
+
if not isinstance(
|
| 663 |
+
self.node, (torch._inductor.ir.AllReduce, torch._inductor.ir.InPlaceHint)
|
| 664 |
+
):
|
| 665 |
+
# TODO make this a property of the IR
|
| 666 |
+
return False
|
| 667 |
+
|
| 668 |
+
if len(self.read_writes.writes) == 1:
|
| 669 |
+
write_dep = next(iter(self.read_writes.writes))
|
| 670 |
+
numel_diff = read_dep.get_numel() - write_dep.get_numel()
|
| 671 |
+
return V.graph.sizevars.simplify(numel_diff) == 0
|
| 672 |
+
|
| 673 |
+
return False
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
class NopKernelSchedulerNode(BaseSchedulerNode):
|
| 677 |
+
pass
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
class SchedulerNode(BaseSchedulerNode):
|
| 681 |
+
def __init__(
|
| 682 |
+
self,
|
| 683 |
+
scheduler: "Scheduler",
|
| 684 |
+
node: Union[ir.ComputedBuffer, ir.TemplateBuffer],
|
| 685 |
+
):
|
| 686 |
+
super().__init__(scheduler, node)
|
| 687 |
+
self._compute_attrs()
|
| 688 |
+
|
| 689 |
+
def _compute_attrs(
|
| 690 |
+
self,
|
| 691 |
+
extra_indexing_constraints: Optional[Tuple[Dict[Any, Any], List[Any]]] = None,
|
| 692 |
+
):
|
| 693 |
+
assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer))
|
| 694 |
+
self._sizes, self._body = self.node.simplify_and_reorder(
|
| 695 |
+
extra_indexing_constraints=extra_indexing_constraints
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
group_fn = self.scheduler.get_backend(self.node.get_device()).group_fn
|
| 699 |
+
self.group = (self.node.get_device(), group_fn(self._sizes))
|
| 700 |
+
|
| 701 |
+
if isinstance(self.node, ir.TemplateBuffer):
|
| 702 |
+
self.set_read_writes(self.node.normalized_read_writes())
|
| 703 |
+
else:
|
| 704 |
+
self.set_read_writes(
|
| 705 |
+
dependencies.extract_read_writes(
|
| 706 |
+
self._body, *self._sizes, normalize=True
|
| 707 |
+
)
|
| 708 |
+
)
|
| 709 |
+
|
| 710 |
+
def recompute_size_and_body(
|
| 711 |
+
self, extra_indexing_constraints: Tuple[Dict[Any, Any], List[Any]]
|
| 712 |
+
):
|
| 713 |
+
self._compute_attrs(extra_indexing_constraints=extra_indexing_constraints)
|
| 714 |
+
|
| 715 |
+
def debug_str_extra(self) -> str:
|
| 716 |
+
name = self.get_name()
|
| 717 |
+
lines = [
|
| 718 |
+
f"{name}.group.device = {self.group[0]}",
|
| 719 |
+
f"{name}.group.iteration = {self.group[1]}",
|
| 720 |
+
f"{name}.sizes = {self._sizes}",
|
| 721 |
+
]
|
| 722 |
+
if self.get_aliases():
|
| 723 |
+
lines.append(f"{name}.aliases = {pformat(self.get_aliases())}")
|
| 724 |
+
if self.get_mutations():
|
| 725 |
+
lines.append(f"{name}.mutations = {pformat(self.get_mutations())}")
|
| 726 |
+
if isinstance(self._body, ir.LoopBody):
|
| 727 |
+
lines.append(f"class {name}_loop_body:")
|
| 728 |
+
lines.append(textwrap.indent(self._body.debug_str(), " "))
|
| 729 |
+
return "\n".join(lines)
|
| 730 |
+
|
| 731 |
+
def get_ranges(self):
|
| 732 |
+
return self._sizes
|
| 733 |
+
|
| 734 |
+
def is_reduction(self):
|
| 735 |
+
assert isinstance(
|
| 736 |
+
self.node, (ir.ComputedBuffer, ir.TemplateBuffer)
|
| 737 |
+
), f"{type(self.node)=}"
|
| 738 |
+
return bool(self.node.get_reduction_type())
|
| 739 |
+
|
| 740 |
+
def is_split_scan(self):
|
| 741 |
+
assert isinstance(
|
| 742 |
+
self.node, (ir.ComputedBuffer, ir.TemplateBuffer)
|
| 743 |
+
), f"{type(self.node)=}"
|
| 744 |
+
return isinstance(self.node, ir.ComputedBuffer) and isinstance(
|
| 745 |
+
self.node.data, ir.SplitScan
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
def is_template(self):
|
| 749 |
+
return isinstance(self.node, ir.TemplateBuffer)
|
| 750 |
+
|
| 751 |
+
def get_template_node(self):
|
| 752 |
+
return self.node if self.is_template() else None
|
| 753 |
+
|
| 754 |
+
def run(self, *index_vars):
|
| 755 |
+
self.decide_inplace_update()
|
| 756 |
+
self.mark_run()
|
| 757 |
+
self.codegen(index_vars)
|
| 758 |
+
|
| 759 |
+
def mark_run(self):
|
| 760 |
+
self.allocate()
|
| 761 |
+
|
| 762 |
+
def ranges_from_index_vars(self, index_vars):
|
| 763 |
+
sizes = self._sizes
|
| 764 |
+
assert sum(map(len, sizes)) == sum(map(len, index_vars))
|
| 765 |
+
var_ranges = dict(
|
| 766 |
+
zip(
|
| 767 |
+
itertools.chain.from_iterable(index_vars),
|
| 768 |
+
itertools.chain.from_iterable(sizes),
|
| 769 |
+
)
|
| 770 |
+
)
|
| 771 |
+
return var_ranges
|
| 772 |
+
|
| 773 |
+
def codegen(self, index_vars):
|
| 774 |
+
var_ranges = self.ranges_from_index_vars(index_vars)
|
| 775 |
+
try:
|
| 776 |
+
with V.set_ops_handler(
|
| 777 |
+
SimplifyIndexing(V.get_ops_handler(), var_ranges)
|
| 778 |
+
), V.kernel.set_current_node(self):
|
| 779 |
+
self._body(*index_vars)
|
| 780 |
+
except Exception:
|
| 781 |
+
log.fatal("Error in codegen for %s", self.node)
|
| 782 |
+
raise
|
| 783 |
+
|
| 784 |
+
def pointwise_read_writes(self):
|
| 785 |
+
"""
|
| 786 |
+
Get the memory dependencies in the non-reduction axis.
|
| 787 |
+
"""
|
| 788 |
+
sizes, reduction_sizes = self._sizes
|
| 789 |
+
|
| 790 |
+
def fn(index):
|
| 791 |
+
return self._body(index, [sympy.Integer(0) for _ in reduction_sizes])
|
| 792 |
+
|
| 793 |
+
return dependencies.extract_read_writes(fn, sizes)
|
| 794 |
+
|
| 795 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 796 |
+
if self.get_aliases() or self.is_template():
|
| 797 |
+
return False
|
| 798 |
+
if len(self.read_writes.writes) == 1 and isinstance(
|
| 799 |
+
read_dep, dependencies.MemoryDep
|
| 800 |
+
):
|
| 801 |
+
write_dep = next(iter(self.read_writes.writes))
|
| 802 |
+
assert isinstance(write_dep, dependencies.MemoryDep), f"{type(write_dep)=}"
|
| 803 |
+
return read_dep.index == write_dep.index and read_dep.size == write_dep.size
|
| 804 |
+
return False
|
| 805 |
+
|
| 806 |
+
@cache_on_self
|
| 807 |
+
def _get_atomic_add_buffers(self) -> Set[str]:
|
| 808 |
+
buffers_store_as_atomic_add = set()
|
| 809 |
+
if isinstance(self._body, ir.LoopBody):
|
| 810 |
+
for node in self._body.get_nodes():
|
| 811 |
+
if (
|
| 812 |
+
node.op == "call_method"
|
| 813 |
+
and node.target == "store"
|
| 814 |
+
and (
|
| 815 |
+
("mode" in node.kwargs and node.kwargs["mode"] == "atomic_add")
|
| 816 |
+
or (len(node.args) == 5 and node.args[4] == "atomic_add")
|
| 817 |
+
)
|
| 818 |
+
):
|
| 819 |
+
buffers_store_as_atomic_add.add(
|
| 820 |
+
node.kwargs["name"]
|
| 821 |
+
if "name" in node.kwargs
|
| 822 |
+
else (node.args[1] if len(node.args) >= 2 else "")
|
| 823 |
+
)
|
| 824 |
+
return buffers_store_as_atomic_add
|
| 825 |
+
|
| 826 |
+
def has_atomic_add(self, check_buf):
|
| 827 |
+
return check_buf in self._get_atomic_add_buffers()
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
class FusedSchedulerNode(BaseSchedulerNode):
|
| 831 |
+
"""
|
| 832 |
+
This is a "fake" scheduler node that represents a group of scheduler nodes
|
| 833 |
+
that are meant to be fused together. The way it does this is by maintaining
|
| 834 |
+
its unmet dependencies as the union of its constituent nodes.
|
| 835 |
+
"""
|
| 836 |
+
|
| 837 |
+
@classmethod
|
| 838 |
+
def fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 839 |
+
assert node1.scheduler is node2.scheduler
|
| 840 |
+
assert isinstance(node1, (SchedulerNode, FusedSchedulerNode)) and isinstance(
|
| 841 |
+
node2, (SchedulerNode, FusedSchedulerNode)
|
| 842 |
+
)
|
| 843 |
+
return cls(node1.scheduler, list(node1.get_nodes()) + list(node2.get_nodes())) # type: ignore[arg-type]
|
| 844 |
+
|
| 845 |
+
def __init__(self, scheduler: "Scheduler", snodes: List[SchedulerNode]):
|
| 846 |
+
# NB: No need to call super().__init__() because we don't need to re-use any of its logic.
|
| 847 |
+
self.snodes = snodes
|
| 848 |
+
self.scheduler = scheduler
|
| 849 |
+
self.node: ir.Buffer = None # type: ignore[assignment]
|
| 850 |
+
self.users: List[NodeUser] = []
|
| 851 |
+
self.inverse_users = []
|
| 852 |
+
self.node_users = []
|
| 853 |
+
self.group = max(snodes, key=lambda x: int(x.is_reduction())).group
|
| 854 |
+
self.ancestors = set.union(
|
| 855 |
+
*[x.ancestors for x in snodes if x.ancestors is not None]
|
| 856 |
+
)
|
| 857 |
+
|
| 858 |
+
self.set_read_writes(
|
| 859 |
+
dependencies.ReadWrites.merge_list([x.read_writes for x in snodes])
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
self.unmet_dependencies = {
|
| 863 |
+
dep
|
| 864 |
+
for dep in set.union(*[x.unmet_dependencies for x in snodes])
|
| 865 |
+
if dep.name not in self.get_names()
|
| 866 |
+
} - self.read_writes.writes
|
| 867 |
+
self.min_order = min([x.min_order for x in self.snodes])
|
| 868 |
+
self.max_order = max([x.max_order for x in self.snodes])
|
| 869 |
+
|
| 870 |
+
@cache_on_self
|
| 871 |
+
def get_name(self) -> str:
|
| 872 |
+
return "_".join([x.get_name() for x in self.snodes])
|
| 873 |
+
|
| 874 |
+
def get_first_name(self) -> str:
|
| 875 |
+
return self.snodes[0].get_name()
|
| 876 |
+
|
| 877 |
+
@cache_on_self
|
| 878 |
+
def get_names(self) -> Set[str]:
|
| 879 |
+
return set.union(*[x.get_names() for x in self.snodes])
|
| 880 |
+
|
| 881 |
+
def debug_str_extra(self) -> str:
|
| 882 |
+
lines = [
|
| 883 |
+
f"{self.get_name()}.snodes[{i}] =\n{node.debug_str()}"
|
| 884 |
+
for i, node in enumerate(self.snodes)
|
| 885 |
+
]
|
| 886 |
+
return textwrap.indent("\n".join(lines).rstrip(), " ")
|
| 887 |
+
|
| 888 |
+
def set_last_usage(
|
| 889 |
+
self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
|
| 890 |
+
):
|
| 891 |
+
# Set self.last_usage using the global information
|
| 892 |
+
# This will be used for inter-kernel optimisations
|
| 893 |
+
super().set_last_usage(future_used_buffers, mutation_real_name)
|
| 894 |
+
# Set self.last_usage on the snodes
|
| 895 |
+
# This will be used for optimisations within the kernel
|
| 896 |
+
future_used_buffers: Set[str] = set()
|
| 897 |
+
for node in reversed(self.snodes):
|
| 898 |
+
node.set_last_usage(future_used_buffers, mutation_real_name)
|
| 899 |
+
future_used_buffers.update(node.last_usage) # type: ignore[arg-type]
|
| 900 |
+
|
| 901 |
+
@cache_on_self
|
| 902 |
+
def used_buffer_names(self) -> Set[str]:
|
| 903 |
+
return set.union(*[x.used_buffer_names() for x in self.snodes])
|
| 904 |
+
|
| 905 |
+
@cache_on_self
|
| 906 |
+
def used_or_aliased_buffer_names(self) -> Set[str]:
|
| 907 |
+
return set.union(*[x.used_or_aliased_buffer_names() for x in self.snodes])
|
| 908 |
+
|
| 909 |
+
def get_nodes(self) -> List[SchedulerNode]:
|
| 910 |
+
return self.snodes
|
| 911 |
+
|
| 912 |
+
def __repr__(self):
|
| 913 |
+
return f"{type(self).__name__}(nodes={self.get_name()})"
|
| 914 |
+
|
| 915 |
+
@cache_on_self
|
| 916 |
+
def is_reduction(self):
|
| 917 |
+
return any(x.is_reduction() for x in self.snodes)
|
| 918 |
+
|
| 919 |
+
@cache_on_self
|
| 920 |
+
def is_split_scan(self):
|
| 921 |
+
return any(x.is_split_scan() for x in self.snodes)
|
| 922 |
+
|
| 923 |
+
@cache_on_self
|
| 924 |
+
def is_template(self):
|
| 925 |
+
return any(x.is_template() for x in self.snodes)
|
| 926 |
+
|
| 927 |
+
@cache_on_self
|
| 928 |
+
def get_template_node(self):
|
| 929 |
+
for node in self.snodes:
|
| 930 |
+
if node.is_template():
|
| 931 |
+
return node
|
| 932 |
+
return None
|
| 933 |
+
|
| 934 |
+
def get_device(self):
|
| 935 |
+
return self.group[0]
|
| 936 |
+
|
| 937 |
+
@cache_on_self
|
| 938 |
+
def has_aliasing_or_mutation(self):
|
| 939 |
+
return any(x.has_aliasing_or_mutation() for x in self.snodes)
|
| 940 |
+
|
| 941 |
+
@cache_on_self
|
| 942 |
+
def op_counts(self):
|
| 943 |
+
op_counts: Counter[str] = collections.Counter()
|
| 944 |
+
for node in self.snodes:
|
| 945 |
+
op_counts.update(node.op_counts())
|
| 946 |
+
return op_counts
|
| 947 |
+
|
| 948 |
+
def has_atomic_add(self, check_buf):
|
| 949 |
+
return any(
|
| 950 |
+
(
|
| 951 |
+
isinstance(sub_schedule_node1, SchedulerNode)
|
| 952 |
+
and sub_schedule_node1.has_atomic_add(check_buf)
|
| 953 |
+
)
|
| 954 |
+
for sub_schedule_node1 in self.get_nodes()
|
| 955 |
+
)
|
| 956 |
+
|
| 957 |
+
# None of these need to be implemented, as a FusedSchedulerNode is just an
|
| 958 |
+
# abstraction for scheduling purposes
|
| 959 |
+
def update_mutated_names(self, renames: Dict[str, str]):
|
| 960 |
+
raise NotImplementedError
|
| 961 |
+
|
| 962 |
+
def add_mutation_dep(self, name):
|
| 963 |
+
raise NotImplementedError
|
| 964 |
+
|
| 965 |
+
def set_users(self, users: List["NodeUser"]):
|
| 966 |
+
raise NotImplementedError
|
| 967 |
+
|
| 968 |
+
def get_aliases(self):
|
| 969 |
+
raise NotImplementedError
|
| 970 |
+
|
| 971 |
+
def get_mutations(self):
|
| 972 |
+
raise NotImplementedError
|
| 973 |
+
|
| 974 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 975 |
+
raise NotImplementedError
|
| 976 |
+
|
| 977 |
+
def allocate(self):
|
| 978 |
+
raise NotImplementedError
|
| 979 |
+
|
| 980 |
+
def can_free(self):
|
| 981 |
+
raise NotImplementedError
|
| 982 |
+
|
| 983 |
+
def debug_str(self) -> str:
|
| 984 |
+
"""Longer form printout for trace logs"""
|
| 985 |
+
name = self.get_name()
|
| 986 |
+
node_typestr = ",".join(type(n).__name__ for n in self.snodes)
|
| 987 |
+
lines = [
|
| 988 |
+
f"{name}: {type(self).__name__}({node_typestr})",
|
| 989 |
+
f"{name}.writes = {pformat(self.read_writes.writes)}",
|
| 990 |
+
f"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}",
|
| 991 |
+
f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}",
|
| 992 |
+
f"{name}.users = {self.users}",
|
| 993 |
+
]
|
| 994 |
+
try:
|
| 995 |
+
lines += [
|
| 996 |
+
self.debug_str_extra(),
|
| 997 |
+
]
|
| 998 |
+
except Exception:
|
| 999 |
+
log.warning("Ignoring error in debug_str()", exc_info=True)
|
| 1000 |
+
|
| 1001 |
+
return "\n".join(lines).rstrip()
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
class ForeachKernelSchedulerNode(FusedSchedulerNode):
|
| 1005 |
+
"""Scheduler node which consists of a list of scheduler nodes that each operate on a
|
| 1006 |
+
distinct tensor in a list of tensors."""
|
| 1007 |
+
|
| 1008 |
+
def get_consumer_subnode_for(self, producer):
|
| 1009 |
+
if producer.get_name() in self.read_to_node:
|
| 1010 |
+
return self.read_to_node[producer.get_name()]
|
| 1011 |
+
|
| 1012 |
+
return None
|
| 1013 |
+
|
| 1014 |
+
def get_producer_subnode_for(self, consumer):
|
| 1015 |
+
for rd in consumer.read_writes.reads:
|
| 1016 |
+
if rd.name in self.name_to_node:
|
| 1017 |
+
return self.name_to_node[rd.name]
|
| 1018 |
+
|
| 1019 |
+
return None
|
| 1020 |
+
|
| 1021 |
+
@classmethod
|
| 1022 |
+
def can_fuse(cls, producer, consumer):
|
| 1023 |
+
why = WhyNoFuse(producer, consumer)
|
| 1024 |
+
if producer.is_foreach() and consumer.is_foreach():
|
| 1025 |
+
foreach_match = len(producer.snodes) == len(consumer.snodes)
|
| 1026 |
+
if not foreach_match:
|
| 1027 |
+
why("foreach do not have same length")
|
| 1028 |
+
return foreach_match and all(
|
| 1029 |
+
producer.scheduler.can_fuse(l, r)
|
| 1030 |
+
for l, r in zip(producer.snodes, consumer.snodes)
|
| 1031 |
+
)
|
| 1032 |
+
elif consumer.is_foreach():
|
| 1033 |
+
consumer_subnode = consumer.get_consumer_subnode_for(producer)
|
| 1034 |
+
if consumer_subnode is not None:
|
| 1035 |
+
return consumer.scheduler.can_fuse(producer, consumer_subnode)
|
| 1036 |
+
|
| 1037 |
+
why("candidate producer is not dep of any foreach consumer")
|
| 1038 |
+
return False
|
| 1039 |
+
|
| 1040 |
+
elif producer.is_foreach():
|
| 1041 |
+
producer_subnode = producer.get_producer_subnode_for(consumer)
|
| 1042 |
+
if producer_subnode is not None:
|
| 1043 |
+
return producer.scheduler.can_fuse(producer_subnode, consumer)
|
| 1044 |
+
|
| 1045 |
+
why("candidate consumer has no dep in any foreach producer")
|
| 1046 |
+
return False
|
| 1047 |
+
|
| 1048 |
+
raise AssertionError(
|
| 1049 |
+
"At least one node passed to ForeachKernelSchedulerNode.can_fuse should be a foreach node"
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
@classmethod
|
| 1053 |
+
def fuse(cls, producer, consumer):
|
| 1054 |
+
assert producer.is_foreach() or consumer.is_foreach()
|
| 1055 |
+
prev_node_1 = None
|
| 1056 |
+
prev_node_2 = None
|
| 1057 |
+
if producer.is_foreach() and consumer.is_foreach():
|
| 1058 |
+
fused_nodes = [
|
| 1059 |
+
FusedSchedulerNode.fuse(l, r)
|
| 1060 |
+
for l, r in zip(producer.snodes, consumer.snodes)
|
| 1061 |
+
]
|
| 1062 |
+
elif producer.is_foreach():
|
| 1063 |
+
producer_subnode = producer.get_producer_subnode_for(consumer)
|
| 1064 |
+
fused_nodes = []
|
| 1065 |
+
prev_node_1 = producer
|
| 1066 |
+
prev_node_2 = None
|
| 1067 |
+
for node in producer.snodes:
|
| 1068 |
+
if node is producer_subnode:
|
| 1069 |
+
new_node = FusedSchedulerNode.fuse(node, consumer)
|
| 1070 |
+
prev_node_2 = new_node
|
| 1071 |
+
fused_nodes.append(new_node)
|
| 1072 |
+
else:
|
| 1073 |
+
fused_nodes.append(node)
|
| 1074 |
+
|
| 1075 |
+
elif consumer.is_foreach():
|
| 1076 |
+
consumer_subnode = consumer.get_consumer_subnode_for(producer)
|
| 1077 |
+
fused_nodes = []
|
| 1078 |
+
prev_node_1 = consumer
|
| 1079 |
+
prev_node_2 = None
|
| 1080 |
+
|
| 1081 |
+
for node in consumer.snodes:
|
| 1082 |
+
if node is consumer_subnode:
|
| 1083 |
+
new_node = FusedSchedulerNode.fuse(producer, node)
|
| 1084 |
+
prev_node_2 = new_node
|
| 1085 |
+
fused_nodes.append(new_node)
|
| 1086 |
+
else:
|
| 1087 |
+
fused_nodes.append(node)
|
| 1088 |
+
|
| 1089 |
+
return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2) # type: ignore[possibly-undefined]
|
| 1090 |
+
|
| 1091 |
+
def __init__(
|
| 1092 |
+
self,
|
| 1093 |
+
scheduler: "Scheduler",
|
| 1094 |
+
nodes: List[SchedulerNode],
|
| 1095 |
+
prev_node_1=None,
|
| 1096 |
+
prev_node_2=None,
|
| 1097 |
+
):
|
| 1098 |
+
self.read_to_node = {}
|
| 1099 |
+
self.name_to_node = {}
|
| 1100 |
+
|
| 1101 |
+
if prev_node_1 is None or prev_node_2 is None:
|
| 1102 |
+
super().__init__(scheduler, nodes)
|
| 1103 |
+
|
| 1104 |
+
for node in nodes:
|
| 1105 |
+
for read in node.read_writes.reads:
|
| 1106 |
+
self.read_to_node[read.name] = node
|
| 1107 |
+
|
| 1108 |
+
for name in node.get_names():
|
| 1109 |
+
self.name_to_node[name] = node
|
| 1110 |
+
else:
|
| 1111 |
+
self.scheduler = scheduler
|
| 1112 |
+
self.snodes = nodes
|
| 1113 |
+
self.node: ir.Buffer = None # type: ignore[assignment]
|
| 1114 |
+
self.users: List[NodeUser] = []
|
| 1115 |
+
|
| 1116 |
+
self.set_read_writes(
|
| 1117 |
+
dependencies.ReadWrites.merge_list(
|
| 1118 |
+
[prev_node_1.read_writes, prev_node_2.read_writes]
|
| 1119 |
+
)
|
| 1120 |
+
)
|
| 1121 |
+
|
| 1122 |
+
self.unmet_dependencies = {
|
| 1123 |
+
dep
|
| 1124 |
+
for dep in set.union(
|
| 1125 |
+
prev_node_1.unmet_dependencies, prev_node_2.unmet_dependencies
|
| 1126 |
+
)
|
| 1127 |
+
if dep.name not in self.get_names()
|
| 1128 |
+
} - self.read_writes.writes
|
| 1129 |
+
|
| 1130 |
+
self.min_order = min([prev_node_1.min_order, prev_node_2.min_order])
|
| 1131 |
+
self.max_order = max([prev_node_1.max_order, prev_node_2.max_order])
|
| 1132 |
+
|
| 1133 |
+
foreach_node = prev_node_1 if prev_node_1.is_foreach() else prev_node_2
|
| 1134 |
+
other_node = prev_node_2 if prev_node_1.is_foreach() else prev_node_1
|
| 1135 |
+
|
| 1136 |
+
self.ancestors = foreach_node.ancestors
|
| 1137 |
+
self.ancestors.update(other_node.ancestors)
|
| 1138 |
+
|
| 1139 |
+
self.name_to_node = foreach_node.name_to_node
|
| 1140 |
+
for name in other_node.get_names():
|
| 1141 |
+
self.name_to_node[name] = other_node
|
| 1142 |
+
|
| 1143 |
+
self.group = (nodes[0].get_device(), "foreach")
|
| 1144 |
+
|
| 1145 |
+
self.origins: Set[torch.fx.Node] = set()
|
| 1146 |
+
|
| 1147 |
+
def mark_run(self):
|
| 1148 |
+
raise NotImplementedError
|
| 1149 |
+
|
| 1150 |
+
def codegen(self):
|
| 1151 |
+
assert isinstance(self.node, ir.ComputedBuffer), f"{type(self.node)=}"
|
| 1152 |
+
self.node.get_store_function()(self.node.make_loader()())
|
| 1153 |
+
|
| 1154 |
+
def can_free(self):
|
| 1155 |
+
return NotImplementedError
|
| 1156 |
+
|
| 1157 |
+
def is_foreach(self):
|
| 1158 |
+
return True
|
| 1159 |
+
|
| 1160 |
+
def get_subkernel_nodes(self):
|
| 1161 |
+
"""Returns a list of nodes which comprise the foreach kernel, operating on corresponding elements of our input lists.
|
| 1162 |
+
These nodes may be vertically fused."""
|
| 1163 |
+
return list(self.snodes)
|
| 1164 |
+
|
| 1165 |
+
def get_nodes(self):
|
| 1166 |
+
"""Returns all nodes contained in this kernel, unpacking fused nodes into their constituent scheduler nodes."""
|
| 1167 |
+
return list(itertools.chain.from_iterable(x.get_nodes() for x in self.snodes))
|
| 1168 |
+
|
| 1169 |
+
def get_first_name(self):
|
| 1170 |
+
return self.snodes[0].get_first_name()
|
| 1171 |
+
|
| 1172 |
+
def prune_redundant_deps(self, name_to_fused_node):
|
| 1173 |
+
_prune_redundant_deps(self, name_to_fused_node)
|
| 1174 |
+
|
| 1175 |
+
for node in self.snodes:
|
| 1176 |
+
node.prune_redundant_deps(name_to_fused_node)
|
| 1177 |
+
|
| 1178 |
+
|
| 1179 |
+
def pick_loop_order(stride_lengths, sizes, priority_idx=()):
|
| 1180 |
+
"""
|
| 1181 |
+
A heuristic to decide loop iteration orders. This has not been well
|
| 1182 |
+
tuned and may be something we should autotune.
|
| 1183 |
+
"""
|
| 1184 |
+
|
| 1185 |
+
@functools.cmp_to_key
|
| 1186 |
+
def index_cmp(a, b):
|
| 1187 |
+
if sizes[a] == 1 or sizes[b] == 1:
|
| 1188 |
+
# 1-sizes don't matter, just move them to the end
|
| 1189 |
+
return cmp(sizes[a] == 1, sizes[b] == 1)
|
| 1190 |
+
|
| 1191 |
+
stride_len_a = [sl[a] for sl in stride_lengths]
|
| 1192 |
+
stride_len_b = [sl[b] for sl in stride_lengths]
|
| 1193 |
+
|
| 1194 |
+
# equivalent to
|
| 1195 |
+
# np.logical_or(stride_lengths[:, b] == 0, stride_lengths[:, a] < stride_lengths[:, b]).all()
|
| 1196 |
+
a_first = sum(
|
| 1197 |
+
sl_b == 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b)
|
| 1198 |
+
)
|
| 1199 |
+
b_first = sum(
|
| 1200 |
+
sl_a == 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b)
|
| 1201 |
+
)
|
| 1202 |
+
if a_first > b_first:
|
| 1203 |
+
return -1
|
| 1204 |
+
if b_first > a_first:
|
| 1205 |
+
return 1
|
| 1206 |
+
|
| 1207 |
+
# otherwise contiguous
|
| 1208 |
+
return cmp(b, a)
|
| 1209 |
+
|
| 1210 |
+
order = list(reversed(range(len(stride_lengths[0]))))
|
| 1211 |
+
if len(priority_idx) > 0:
|
| 1212 |
+
# if we have priority node, only use that node's order
|
| 1213 |
+
stride_lengths = [stride_lengths[pi] for pi in priority_idx]
|
| 1214 |
+
if config.pick_loop_orders:
|
| 1215 |
+
order.sort(key=index_cmp)
|
| 1216 |
+
return order
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
@dataclasses.dataclass
|
| 1220 |
+
class NodeUser:
|
| 1221 |
+
node: BaseSchedulerNode
|
| 1222 |
+
can_inplace: bool = False
|
| 1223 |
+
|
| 1224 |
+
# A weak user must be scheduled after a given node, but doesn't actually
|
| 1225 |
+
# use the result
|
| 1226 |
+
is_weak: bool = False
|
| 1227 |
+
|
| 1228 |
+
def __hash__(self):
|
| 1229 |
+
return hash((self.node.get_name(), self.can_inplace, self.is_weak))
|
| 1230 |
+
|
| 1231 |
+
def __eq__(self, other):
|
| 1232 |
+
return (
|
| 1233 |
+
self.get_name() == other.get_name()
|
| 1234 |
+
and self.can_inplace == other.can_inplace
|
| 1235 |
+
and self.is_weak == other.is_weak
|
| 1236 |
+
)
|
| 1237 |
+
|
| 1238 |
+
def get_name(self):
|
| 1239 |
+
return self.node.get_name()
|
| 1240 |
+
|
| 1241 |
+
def merge(self, other: "NodeUser") -> "NodeUser":
|
| 1242 |
+
assert self.node is other.node
|
| 1243 |
+
return NodeUser(
|
| 1244 |
+
self.node,
|
| 1245 |
+
self.can_inplace and other.can_inplace,
|
| 1246 |
+
self.is_weak and other.is_weak,
|
| 1247 |
+
)
|
| 1248 |
+
|
| 1249 |
+
|
| 1250 |
+
_post_grad_graph_counter = itertools.count()
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
class Scheduler:
|
| 1254 |
+
@dynamo_timed
|
| 1255 |
+
def __init__(self, nodes):
|
| 1256 |
+
super().__init__()
|
| 1257 |
+
self.backends = {}
|
| 1258 |
+
self.fuse_cache = {}
|
| 1259 |
+
self.post_grad_graph_id = next(_post_grad_graph_counter)
|
| 1260 |
+
|
| 1261 |
+
self.nodes = []
|
| 1262 |
+
self.available_buffer_names = {
|
| 1263 |
+
*V.graph.graph_inputs.keys(),
|
| 1264 |
+
*V.graph.constants.keys(),
|
| 1265 |
+
}
|
| 1266 |
+
|
| 1267 |
+
self.nodes = [self.create_scheduler_node(n) for n in nodes]
|
| 1268 |
+
|
| 1269 |
+
# some new constants could have been created above
|
| 1270 |
+
self.available_buffer_names.update(V.graph.constants.keys())
|
| 1271 |
+
for node in self.nodes:
|
| 1272 |
+
node.prune_deps()
|
| 1273 |
+
|
| 1274 |
+
self.name_to_node: Dict[str, BaseSchedulerNode] = {
|
| 1275 |
+
n.get_name(): n for n in self.nodes
|
| 1276 |
+
}
|
| 1277 |
+
self.name_to_fused_node: Dict[
|
| 1278 |
+
str, BaseSchedulerNode
|
| 1279 |
+
] = dict() # set in fuse_nodes()
|
| 1280 |
+
|
| 1281 |
+
# mutation_real_name: Maps back to the original name for codegen
|
| 1282 |
+
# Example:
|
| 1283 |
+
# If you mutate buf0 inside of buf1's kernel, then:
|
| 1284 |
+
# mutation_real_name = {"buf0" : "buf1"}
|
| 1285 |
+
# all subsequent uses of buf0 become buf1's usage in dependency graph
|
| 1286 |
+
self.mutation_real_name = {}
|
| 1287 |
+
|
| 1288 |
+
# We handle mutation by renaming modified versions of the same
|
| 1289 |
+
# buffer in the dependency graph to prevent cycles.
|
| 1290 |
+
# mutation_renames: tracks the current name for a given buffer
|
| 1291 |
+
# (changed once per mutation)
|
| 1292 |
+
# Example:
|
| 1293 |
+
# If you mutate buf0 inside of buf1's kernel, then:
|
| 1294 |
+
# mutation_renames = {"buf1" : "buf0"}
|
| 1295 |
+
# in codegen we only use buf0, never buf1
|
| 1296 |
+
self.mutation_renames = {}
|
| 1297 |
+
|
| 1298 |
+
self.compute_dependencies()
|
| 1299 |
+
self.topological_sort_schedule()
|
| 1300 |
+
self.dead_node_elimination()
|
| 1301 |
+
if config.reorder_for_compute_comm_overlap:
|
| 1302 |
+
comms.decide_global_ordering_of_comms(self.nodes)
|
| 1303 |
+
self.compute_ancestors()
|
| 1304 |
+
|
| 1305 |
+
metrics.ir_nodes_pre_fusion += len(self.nodes)
|
| 1306 |
+
V.debug.ir_pre_fusion(self.nodes)
|
| 1307 |
+
self.num_orig_nodes = len(self.nodes)
|
| 1308 |
+
self.name_to_fused_node = {n.get_name(): n for n in self.nodes}
|
| 1309 |
+
self.create_foreach_nodes()
|
| 1310 |
+
self.topological_sort_schedule()
|
| 1311 |
+
self.logged_slow_fusion = set()
|
| 1312 |
+
self.fuse_nodes()
|
| 1313 |
+
if config.reorder_for_compute_comm_overlap:
|
| 1314 |
+
# Refresh node_users and inverse_users to reflect fused nodes
|
| 1315 |
+
self.compute_node_users()
|
| 1316 |
+
self.nodes = comms.reorder_compute_and_comm_for_overlap(self.nodes)
|
| 1317 |
+
self.compute_last_usage()
|
| 1318 |
+
V.debug.ir_post_fusion(self.nodes)
|
| 1319 |
+
V.debug.graph_diagram(self.nodes)
|
| 1320 |
+
self.debug_draw_graph()
|
| 1321 |
+
|
| 1322 |
+
# used during codegen:
|
| 1323 |
+
self.current_device: torch.device = None # type: ignore[assignment]
|
| 1324 |
+
self.buffer_names_to_free = set()
|
| 1325 |
+
|
| 1326 |
+
# fx graph node to the position it appears in the graph
|
| 1327 |
+
# for debug attribution
|
| 1328 |
+
self.origin_to_index = {}
|
| 1329 |
+
|
| 1330 |
+
get_metric_table("graph_stats").add_row(
|
| 1331 |
+
lambda: {
|
| 1332 |
+
"graph_id": self.post_grad_graph_id,
|
| 1333 |
+
"num_nodes_before_fusion": self.num_orig_nodes,
|
| 1334 |
+
"num_nodes_after_fusion": len(self.nodes),
|
| 1335 |
+
}
|
| 1336 |
+
)
|
| 1337 |
+
|
| 1338 |
+
def debug_draw_graph(self):
|
| 1339 |
+
"""Generate an image of the graph for debugging"""
|
| 1340 |
+
if os.environ.get("INDUCTOR_WRITE_SCHEDULER_GRAPH", None) == "1":
|
| 1341 |
+
from .debug import draw_buffers
|
| 1342 |
+
|
| 1343 |
+
draw_buffers(self.nodes, print_graph=True)
|
| 1344 |
+
|
| 1345 |
+
def debug_print_nodes(self, label):
|
| 1346 |
+
if log.isEnabledFor(logging.INFO):
|
| 1347 |
+
log.info("%s:", label)
|
| 1348 |
+
for node in self.nodes:
|
| 1349 |
+
node.log_details()
|
| 1350 |
+
|
| 1351 |
+
def create_scheduler_node(self, node):
|
| 1352 |
+
assert (
|
| 1353 |
+
node.origins is not None
|
| 1354 |
+
), "All nodes passed to scheduling must have an origin"
|
| 1355 |
+
if node.is_no_op():
|
| 1356 |
+
return NopKernelSchedulerNode(self, node)
|
| 1357 |
+
elif isinstance(node, (ir.ComputedBuffer, ir.TemplateBuffer)):
|
| 1358 |
+
return SchedulerNode(self, node)
|
| 1359 |
+
elif isinstance(node, ir.ExternKernel):
|
| 1360 |
+
return ExternKernelSchedulerNode(self, node)
|
| 1361 |
+
else:
|
| 1362 |
+
raise NotImplementedError(node)
|
| 1363 |
+
|
| 1364 |
+
def create_foreach_nodes(self):
|
| 1365 |
+
removed_node_names = set()
|
| 1366 |
+
fe_nodes = []
|
| 1367 |
+
kept_node_names = self.name_to_fused_node.keys()
|
| 1368 |
+
|
| 1369 |
+
for names in V.graph.lists.values():
|
| 1370 |
+
names = [
|
| 1371 |
+
name
|
| 1372 |
+
for name in names
|
| 1373 |
+
if name in kept_node_names
|
| 1374 |
+
and not isinstance(self.name_to_node[name], NopKernelSchedulerNode)
|
| 1375 |
+
]
|
| 1376 |
+
if not names:
|
| 1377 |
+
# All nodes eliminated
|
| 1378 |
+
continue
|
| 1379 |
+
|
| 1380 |
+
removed_node_names.update(names)
|
| 1381 |
+
snodes = [self.name_to_node[name] for name in names]
|
| 1382 |
+
|
| 1383 |
+
fe_node = ForeachKernelSchedulerNode(self, snodes) # type: ignore[arg-type]
|
| 1384 |
+
|
| 1385 |
+
fe_nodes.append(fe_node)
|
| 1386 |
+
|
| 1387 |
+
for name in names:
|
| 1388 |
+
self.name_to_fused_node[name] = fe_node
|
| 1389 |
+
|
| 1390 |
+
self.nodes = [
|
| 1391 |
+
node for node in self.nodes if node.get_name() not in removed_node_names
|
| 1392 |
+
] + fe_nodes
|
| 1393 |
+
|
| 1394 |
+
def compute_dependencies(self):
|
| 1395 |
+
"""
|
| 1396 |
+
Create dependency edges between nodes, handling aliasing and
|
| 1397 |
+
mutation properly.
|
| 1398 |
+
"""
|
| 1399 |
+
|
| 1400 |
+
T = TypeVar("T")
|
| 1401 |
+
|
| 1402 |
+
class DedupList(Generic[T]):
|
| 1403 |
+
"""
|
| 1404 |
+
This data structure behaves like a list except it makes sure the
|
| 1405 |
+
elements remain unique.
|
| 1406 |
+
Normally one could use a set/dict for this purpose however
|
| 1407 |
+
the list in question gets elements appended as it is being
|
| 1408 |
+
iterated over which means that we need to keep the list
|
| 1409 |
+
semantics.
|
| 1410 |
+
"""
|
| 1411 |
+
|
| 1412 |
+
def __init__(self, items=None, membership=None):
|
| 1413 |
+
self.items = items or list()
|
| 1414 |
+
self.membership = membership or set()
|
| 1415 |
+
|
| 1416 |
+
def append(self, node_user: T) -> None:
|
| 1417 |
+
if node_user in self.membership:
|
| 1418 |
+
return
|
| 1419 |
+
self.items.append(node_user)
|
| 1420 |
+
self.membership.add(node_user)
|
| 1421 |
+
|
| 1422 |
+
def __add__(self, other: "DedupList[T]") -> "DedupList[T]":
|
| 1423 |
+
new_membership = set.union(self.membership, other.membership)
|
| 1424 |
+
new_items = self.items + [
|
| 1425 |
+
x for x in other.items if x not in self.membership
|
| 1426 |
+
]
|
| 1427 |
+
return DedupList(new_items, new_membership)
|
| 1428 |
+
|
| 1429 |
+
name_to_users: DefaultDict[str, DedupList[NodeUser]] = collections.defaultdict(
|
| 1430 |
+
DedupList
|
| 1431 |
+
)
|
| 1432 |
+
|
| 1433 |
+
# handle aliasing by using python aliasing in name_to_users
|
| 1434 |
+
# if foo aliases bar then we will make name_to_users["foo"] point
|
| 1435 |
+
# to the same python list as name_to_users["bar"]
|
| 1436 |
+
for node1 in self.nodes:
|
| 1437 |
+
node1_name = node1.get_name()
|
| 1438 |
+
for node2_name in node1.get_aliases():
|
| 1439 |
+
if node1_name in name_to_users and node2_name in name_to_users:
|
| 1440 |
+
# merge the two
|
| 1441 |
+
list1 = name_to_users[node1_name]
|
| 1442 |
+
list2 = name_to_users[node2_name]
|
| 1443 |
+
combined = list1 + list2
|
| 1444 |
+
for key in name_to_users.keys():
|
| 1445 |
+
if name_to_users[key] is list1 or name_to_users[key] is list2:
|
| 1446 |
+
name_to_users[key] = combined
|
| 1447 |
+
elif node1_name in name_to_users:
|
| 1448 |
+
name_to_users[node2_name] = name_to_users[node1_name]
|
| 1449 |
+
else:
|
| 1450 |
+
name_to_users[node1_name] = name_to_users[node2_name]
|
| 1451 |
+
|
| 1452 |
+
def rename(n):
|
| 1453 |
+
if n in self.mutation_renames:
|
| 1454 |
+
return rename(self.mutation_renames[n])
|
| 1455 |
+
return n
|
| 1456 |
+
|
| 1457 |
+
def dep_closure(node_name):
|
| 1458 |
+
reachable_names = {node_name}
|
| 1459 |
+
node = self.name_to_node[node_name]
|
| 1460 |
+
write_dep = next(iter(node.read_writes.writes))
|
| 1461 |
+
for read_dep in node.read_writes.reads:
|
| 1462 |
+
if (
|
| 1463 |
+
read_dep.name in self.name_to_node
|
| 1464 |
+
and isinstance(read_dep, dependencies.MemoryDep)
|
| 1465 |
+
and isinstance(write_dep, dependencies.MemoryDep)
|
| 1466 |
+
and read_dep.index == write_dep.index
|
| 1467 |
+
and read_dep.size == write_dep.size
|
| 1468 |
+
):
|
| 1469 |
+
reachable_names.update(dep_closure(read_dep.name))
|
| 1470 |
+
return reachable_names
|
| 1471 |
+
|
| 1472 |
+
def add_user(used_by_name, user_node, can_inplace=False, is_weak=False):
|
| 1473 |
+
name_to_users[rename(used_by_name)].append(
|
| 1474 |
+
NodeUser(user_node, can_inplace, is_weak)
|
| 1475 |
+
)
|
| 1476 |
+
|
| 1477 |
+
unbacked_symbol_to_origin_node = {}
|
| 1478 |
+
|
| 1479 |
+
for node in self.nodes:
|
| 1480 |
+
log.debug("scheduling %s", node.node)
|
| 1481 |
+
|
| 1482 |
+
# unbacked symbols don't follow ordinary buffer dependencies, so
|
| 1483 |
+
# we track their def/uses separately
|
| 1484 |
+
unbacked_symbol_defs = sorted(
|
| 1485 |
+
node.node.get_unbacked_symbol_defs(), key=lambda x: x.name
|
| 1486 |
+
)
|
| 1487 |
+
for s in unbacked_symbol_defs:
|
| 1488 |
+
assert isinstance(s, sympy.Symbol)
|
| 1489 |
+
# Pick the first definer as canonical. There may be multiple
|
| 1490 |
+
# because if a MultiOutputLayout buffer propagates an unbacked
|
| 1491 |
+
# symint to multiple outputs, they will all claim to def it.
|
| 1492 |
+
if s not in unbacked_symbol_to_origin_node:
|
| 1493 |
+
unbacked_symbol_to_origin_node[s] = node
|
| 1494 |
+
|
| 1495 |
+
unbacked_symbol_uses = sorted(
|
| 1496 |
+
node.node.get_unbacked_symbol_uses(), key=lambda x: x.name
|
| 1497 |
+
)
|
| 1498 |
+
# if a kernel takes unbacked symints, register dependencies
|
| 1499 |
+
for s in unbacked_symbol_uses:
|
| 1500 |
+
assert (
|
| 1501 |
+
s in unbacked_symbol_to_origin_node
|
| 1502 |
+
), f"{s} not in {unbacked_symbol_to_origin_node}"
|
| 1503 |
+
node.add_fake_dep(StarDep(unbacked_symbol_to_origin_node[s].get_name()))
|
| 1504 |
+
|
| 1505 |
+
# a node will mutate either 0 or 1 buffers
|
| 1506 |
+
assert len(node.get_mutations()) <= 1
|
| 1507 |
+
for alt_name in node.get_mutations():
|
| 1508 |
+
alt_name = rename(alt_name)
|
| 1509 |
+
# this node must run after the prior writer
|
| 1510 |
+
add_user(alt_name, node)
|
| 1511 |
+
node.add_mutation_dep(StarDep(alt_name))
|
| 1512 |
+
for other_node in name_to_users[alt_name].items:
|
| 1513 |
+
# this node must run after all prior readers
|
| 1514 |
+
other_name = rename(other_node.get_name())
|
| 1515 |
+
known_dep_node_names = dep_closure(node.get_name())
|
| 1516 |
+
if other_name not in known_dep_node_names:
|
| 1517 |
+
# If this node already directly or indirectly depends on other_node,
|
| 1518 |
+
# we don't need to insert an extra dep.
|
| 1519 |
+
node.add_mutation_dep(WeakDep(other_name))
|
| 1520 |
+
add_user(other_name, node, is_weak=True)
|
| 1521 |
+
|
| 1522 |
+
# add normal non-mutation dependencies
|
| 1523 |
+
for read in node.read_writes.reads:
|
| 1524 |
+
is_weak = isinstance(read, WeakDep)
|
| 1525 |
+
add_user(read.name, node, node.can_inplace(read), is_weak)
|
| 1526 |
+
|
| 1527 |
+
node.update_mutated_names(self.mutation_renames)
|
| 1528 |
+
|
| 1529 |
+
# update our renaming scheme for the next iteration
|
| 1530 |
+
for alt_name in node.get_mutations():
|
| 1531 |
+
self.mutation_renames[rename(alt_name)] = node.get_name()
|
| 1532 |
+
self.mutation_renames[alt_name] = node.get_name()
|
| 1533 |
+
self.mutation_real_name[node.get_name()] = self.mutation_real_name.get(
|
| 1534 |
+
alt_name, alt_name
|
| 1535 |
+
)
|
| 1536 |
+
|
| 1537 |
+
# make sure outputs aren't dead-code-eliminated
|
| 1538 |
+
for node_name in V.graph.get_output_names():
|
| 1539 |
+
log.debug("scheduling output %s", node_name)
|
| 1540 |
+
add_user(node_name, OutputNode(StarDep(node_name)))
|
| 1541 |
+
|
| 1542 |
+
# make sure unbacked symints aren't dead-code-eliminated
|
| 1543 |
+
for node in V.graph.graph_outputs:
|
| 1544 |
+
for s in node.get_unbacked_symbol_uses():
|
| 1545 |
+
assert (
|
| 1546 |
+
s in unbacked_symbol_to_origin_node
|
| 1547 |
+
), f"{s} not in {unbacked_symbol_to_origin_node.keys()}"
|
| 1548 |
+
node_name = unbacked_symbol_to_origin_node[s].node.name
|
| 1549 |
+
log.debug("scheduling output %s for unbacked symint %s", node_name, s)
|
| 1550 |
+
add_user(node_name, OutputNode(StarDep(node_name)))
|
| 1551 |
+
|
| 1552 |
+
# make sure input mutation isn't dead-code-eliminated
|
| 1553 |
+
for name in self.mutation_renames:
|
| 1554 |
+
if name in V.graph.graph_inputs:
|
| 1555 |
+
add_user(name, OutputNode(StarDep(name)))
|
| 1556 |
+
V.graph.mutated_inputs.add(name)
|
| 1557 |
+
|
| 1558 |
+
inp_names = {
|
| 1559 |
+
name: index for index, name in enumerate(V.graph.graph_inputs.keys())
|
| 1560 |
+
}
|
| 1561 |
+
V.graph.mutated_input_idxs = [
|
| 1562 |
+
inp_names[name] for name in V.graph.mutated_inputs
|
| 1563 |
+
]
|
| 1564 |
+
|
| 1565 |
+
# copy users information onto the nodes
|
| 1566 |
+
for node in self.nodes:
|
| 1567 |
+
node.set_users(name_to_users[node.get_name()].items)
|
| 1568 |
+
|
| 1569 |
+
# populate inverse_users
|
| 1570 |
+
for node in self.nodes:
|
| 1571 |
+
for user in node.users:
|
| 1572 |
+
user.node.inverse_users.append(node)
|
| 1573 |
+
|
| 1574 |
+
def compute_node_users(self):
|
| 1575 |
+
# set up buffer name to (fused)snode mapping
|
| 1576 |
+
buf_to_snode = {}
|
| 1577 |
+
for node in self.nodes:
|
| 1578 |
+
if isinstance(node, FusedSchedulerNode):
|
| 1579 |
+
for x in node.snodes:
|
| 1580 |
+
buf_to_snode[x.get_name()] = node
|
| 1581 |
+
buf_to_snode[node.get_name()] = node
|
| 1582 |
+
|
| 1583 |
+
for node in self.nodes:
|
| 1584 |
+
node.node_users = []
|
| 1585 |
+
node.inverse_users = []
|
| 1586 |
+
|
| 1587 |
+
# compute inverse_users
|
| 1588 |
+
for node in self.nodes:
|
| 1589 |
+
inverse_users = []
|
| 1590 |
+
for dep in node.unmet_dependencies:
|
| 1591 |
+
assert dep.name in buf_to_snode
|
| 1592 |
+
dep_node = buf_to_snode[dep.name]
|
| 1593 |
+
inverse_users.append(dep_node)
|
| 1594 |
+
node.inverse_users = inverse_users
|
| 1595 |
+
|
| 1596 |
+
# compute node_users
|
| 1597 |
+
# TODO: ideally, we should deduplicate .users and .node_users,
|
| 1598 |
+
# but currently .users contains extra information that's difficult to
|
| 1599 |
+
# extract into a standalone container.
|
| 1600 |
+
node_to_users: Dict[BaseSchedulerNode, List[BaseSchedulerNode]] = {}
|
| 1601 |
+
for node in self.nodes:
|
| 1602 |
+
for inverse_user in node.inverse_users:
|
| 1603 |
+
node_to_users.setdefault(inverse_user, []).append(node)
|
| 1604 |
+
for node, users in node_to_users.items():
|
| 1605 |
+
node.node_users = users
|
| 1606 |
+
|
| 1607 |
+
def dead_node_elimination(self):
|
| 1608 |
+
"""
|
| 1609 |
+
Remove any nodes without users
|
| 1610 |
+
"""
|
| 1611 |
+
again = True # repeat until a fixed point
|
| 1612 |
+
while again:
|
| 1613 |
+
updated_nodes = []
|
| 1614 |
+
for node in self.nodes:
|
| 1615 |
+
|
| 1616 |
+
def can_eliminate_user(user: NodeUser):
|
| 1617 |
+
return user.is_weak or user.get_name() in V.graph.removed_buffers
|
| 1618 |
+
|
| 1619 |
+
can_eliminate = not node.has_side_effects() and all(
|
| 1620 |
+
can_eliminate_user(u) for u in node.users
|
| 1621 |
+
)
|
| 1622 |
+
|
| 1623 |
+
if not can_eliminate:
|
| 1624 |
+
updated_nodes.append(node)
|
| 1625 |
+
else:
|
| 1626 |
+
# dead code
|
| 1627 |
+
log.debug("removed dead node: %s", node.get_name())
|
| 1628 |
+
V.graph.removed_buffers.add(node.get_name())
|
| 1629 |
+
|
| 1630 |
+
again = len(self.nodes) > len(updated_nodes)
|
| 1631 |
+
self.nodes = updated_nodes
|
| 1632 |
+
|
| 1633 |
+
# Prune any WeakDeps no longer needed
|
| 1634 |
+
for node in self.nodes:
|
| 1635 |
+
node.prune_weak_deps()
|
| 1636 |
+
|
| 1637 |
+
def topological_sort_schedule(self):
|
| 1638 |
+
"""
|
| 1639 |
+
Ensure self.nodes is in topologically sorted order
|
| 1640 |
+
"""
|
| 1641 |
+
seen: Set[ir.Buffer] = set()
|
| 1642 |
+
name_to_node: Dict[str, ir.Buffer] = dict()
|
| 1643 |
+
result: List[ir.Buffer] = []
|
| 1644 |
+
|
| 1645 |
+
def visit(n):
|
| 1646 |
+
if n not in seen:
|
| 1647 |
+
seen.add(n)
|
| 1648 |
+
for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):
|
| 1649 |
+
visit(name_to_node[dep.name])
|
| 1650 |
+
result.append(n)
|
| 1651 |
+
|
| 1652 |
+
for node in self.nodes:
|
| 1653 |
+
for name in node.get_names():
|
| 1654 |
+
name_to_node[name] = node
|
| 1655 |
+
for node in self.nodes:
|
| 1656 |
+
visit(node)
|
| 1657 |
+
self.nodes = result
|
| 1658 |
+
|
| 1659 |
+
def compute_ancestors(self):
|
| 1660 |
+
"""
|
| 1661 |
+
Populate each node.ancestors
|
| 1662 |
+
"""
|
| 1663 |
+
# note self.nodes is topologically sorted
|
| 1664 |
+
name_to_ancestors: Dict[str, Set[str]] = {}
|
| 1665 |
+
for node in self.nodes:
|
| 1666 |
+
ancestors = set()
|
| 1667 |
+
for dep in node.unmet_dependencies:
|
| 1668 |
+
ancestors.add(dep.name)
|
| 1669 |
+
ancestors |= name_to_ancestors[dep.name]
|
| 1670 |
+
name_to_ancestors[node.get_name()] = ancestors
|
| 1671 |
+
node.ancestors = ancestors
|
| 1672 |
+
|
| 1673 |
+
for order, node in enumerate(self.nodes):
|
| 1674 |
+
node.min_order = order
|
| 1675 |
+
node.max_order = order
|
| 1676 |
+
|
| 1677 |
+
def fuse_nodes(self):
|
| 1678 |
+
"""
|
| 1679 |
+
Mutates self.nodes to combine nodes into FusedSchedulerNodes.
|
| 1680 |
+
"""
|
| 1681 |
+
for i in range(10):
|
| 1682 |
+
old_len = len(self.nodes)
|
| 1683 |
+
fusion_log.debug(
|
| 1684 |
+
"===== attempting fusion (%d/10): %d nodes =====", i + 1, old_len
|
| 1685 |
+
)
|
| 1686 |
+
self.fuse_nodes_once()
|
| 1687 |
+
new_len = len(self.nodes)
|
| 1688 |
+
fusion_log.debug(
|
| 1689 |
+
"completed fusion round (%d/10): fused %d nodes into %d nodes\n",
|
| 1690 |
+
i + 1,
|
| 1691 |
+
old_len,
|
| 1692 |
+
new_len,
|
| 1693 |
+
)
|
| 1694 |
+
if new_len == old_len or new_len == 1:
|
| 1695 |
+
fusion_log.debug("===== fusion complete (%d iterations) =====", i + 1)
|
| 1696 |
+
break
|
| 1697 |
+
|
| 1698 |
+
def benchmark_fused_nodes(self, nodes):
|
| 1699 |
+
"""
|
| 1700 |
+
Benchmark fused list of nodes and return the execution time
|
| 1701 |
+
in milliseconds on randomly generated inputs.
|
| 1702 |
+
"""
|
| 1703 |
+
assert len(nodes) > 0
|
| 1704 |
+
device = nodes[0].get_device()
|
| 1705 |
+
V.graph.scheduler = self
|
| 1706 |
+
self.current_device = device
|
| 1707 |
+
backend = self.get_backend(device)
|
| 1708 |
+
return backend.benchmark_fused_nodes(nodes)
|
| 1709 |
+
|
| 1710 |
+
def speedup_by_fusion(self, node1, node2):
|
| 1711 |
+
"""
|
| 1712 |
+
If config.benchmark_fusion is False, always return True.
|
| 1713 |
+
Otherwise, return True if fusion can brings speedup.
|
| 1714 |
+
"""
|
| 1715 |
+
if not config.benchmark_fusion:
|
| 1716 |
+
return True
|
| 1717 |
+
|
| 1718 |
+
if (
|
| 1719 |
+
node1.is_template()
|
| 1720 |
+
and not isinstance(node1.get_template_node(), ir.TritonTemplateBuffer)
|
| 1721 |
+
or node1.is_foreach()
|
| 1722 |
+
or node2.is_foreach()
|
| 1723 |
+
):
|
| 1724 |
+
# TODO support benchmarking epilogue fusion
|
| 1725 |
+
return True
|
| 1726 |
+
|
| 1727 |
+
node_list_1 = node1.get_nodes()
|
| 1728 |
+
device = node_list_1[0].get_device()
|
| 1729 |
+
|
| 1730 |
+
# don't support benchmark fusion for CPU right now.
|
| 1731 |
+
if device.type == "cpu":
|
| 1732 |
+
return True
|
| 1733 |
+
|
| 1734 |
+
node_list_2 = node2.get_nodes()
|
| 1735 |
+
node_list_fused = node_list_1 + node_list_2
|
| 1736 |
+
|
| 1737 |
+
# We can not accurately benchmark kernel using atomic_add
|
| 1738 |
+
# due to how we generate random integer inputs.
|
| 1739 |
+
# Skip benchmarking them by allowing fusion.
|
| 1740 |
+
if any(
|
| 1741 |
+
hasattr(n.node, "data")
|
| 1742 |
+
and hasattr(n.node.data, "scatter_mode")
|
| 1743 |
+
and n.node.data.scatter_mode == "atomic_add"
|
| 1744 |
+
for n in node_list_fused
|
| 1745 |
+
):
|
| 1746 |
+
return True
|
| 1747 |
+
|
| 1748 |
+
from triton.compiler.errors import CompilationError
|
| 1749 |
+
|
| 1750 |
+
why = WhyNoFuse(node1, node2)
|
| 1751 |
+
|
| 1752 |
+
try:
|
| 1753 |
+
ms1, path1 = self.benchmark_fused_nodes(node_list_1)
|
| 1754 |
+
if math.isinf(ms1):
|
| 1755 |
+
why("register spilling of the first kernel")
|
| 1756 |
+
return False
|
| 1757 |
+
ms2, path2 = self.benchmark_fused_nodes(node_list_2)
|
| 1758 |
+
if math.isinf(ms2):
|
| 1759 |
+
why("register spilling of the second kernel")
|
| 1760 |
+
return False
|
| 1761 |
+
ms_fused, path_fused = self.benchmark_fused_nodes(node_list_fused)
|
| 1762 |
+
if math.isinf(ms_fused):
|
| 1763 |
+
why("register spilling of the fused kernel")
|
| 1764 |
+
return False
|
| 1765 |
+
except CompilationError as e:
|
| 1766 |
+
# workaround triton issue: https://github.com/openai/triton/issues/2151
|
| 1767 |
+
if "Loop-carried variable" in str(e):
|
| 1768 |
+
return True # allow fusion
|
| 1769 |
+
else:
|
| 1770 |
+
raise
|
| 1771 |
+
|
| 1772 |
+
if fusion_log.isEnabledFor(logging.DEBUG):
|
| 1773 |
+
if ms_fused < ms1 + ms2:
|
| 1774 |
+
fusion_log.debug(
|
| 1775 |
+
"can fuse (benchmark): fusing %s with %s cause %sx speedup",
|
| 1776 |
+
node1.get_names(),
|
| 1777 |
+
node2.get_names(),
|
| 1778 |
+
green_text(f"{(ms1 + ms2) / ms_fused:.3f}"),
|
| 1779 |
+
)
|
| 1780 |
+
else:
|
| 1781 |
+
fusion_log.debug(
|
| 1782 |
+
"cannot fuse (benchmark): fusing %s with %s cause %sx slowdown",
|
| 1783 |
+
node1.get_names(),
|
| 1784 |
+
node2.get_names(),
|
| 1785 |
+
red_text(f"{ms_fused / (ms1 + ms2):.3f}"),
|
| 1786 |
+
)
|
| 1787 |
+
|
| 1788 |
+
if (
|
| 1789 |
+
is_metric_table_enabled("slow_fusion")
|
| 1790 |
+
and ms_fused >= ms1 + ms2
|
| 1791 |
+
and (path1, path2) not in self.logged_slow_fusion
|
| 1792 |
+
):
|
| 1793 |
+
self.logged_slow_fusion.add((path1, path2))
|
| 1794 |
+
get_metric_table("slow_fusion").add_row(
|
| 1795 |
+
lambda: {
|
| 1796 |
+
"kernel1_path": path1,
|
| 1797 |
+
"kernel1_latency": ms1,
|
| 1798 |
+
"kernel2_path": path2,
|
| 1799 |
+
"kernel2_latency": ms2,
|
| 1800 |
+
"fused_kernel_path": path_fused,
|
| 1801 |
+
"fused_kernel_latency": ms_fused,
|
| 1802 |
+
"slow_down_ratio": ms_fused / (ms1 + ms2),
|
| 1803 |
+
}
|
| 1804 |
+
)
|
| 1805 |
+
return ms_fused < ms1 + ms2
|
| 1806 |
+
|
| 1807 |
+
def fuse_nodes_once(self):
|
| 1808 |
+
"""
|
| 1809 |
+
Mutates self.nodes to combine nodes into FusedSchedulerNodes.
|
| 1810 |
+
|
| 1811 |
+
This relies on two key functions to control the logic:
|
| 1812 |
+
- self.can_fuse(): checks if a fusion is legal
|
| 1813 |
+
- self.score_fusion(): assigns priority to a given fusion
|
| 1814 |
+
"""
|
| 1815 |
+
fused_nodes = set(self.nodes)
|
| 1816 |
+
for node1, node2 in self.get_possible_fusions():
|
| 1817 |
+
node1 = self.name_to_fused_node[node1.get_first_name()]
|
| 1818 |
+
node2 = self.name_to_fused_node[node2.get_first_name()]
|
| 1819 |
+
if self.can_fuse(node1, node2) and not self.will_fusion_create_cycle(
|
| 1820 |
+
node1, node2
|
| 1821 |
+
):
|
| 1822 |
+
if not self.speedup_by_fusion(node1, node2):
|
| 1823 |
+
continue
|
| 1824 |
+
fusion_log.debug(
|
| 1825 |
+
"fusing %s with %s", node1.get_name(), node2.get_name()
|
| 1826 |
+
)
|
| 1827 |
+
|
| 1828 |
+
# above can_fuse asserts that node2 has the same device
|
| 1829 |
+
device = node1.get_device()
|
| 1830 |
+
node3 = self.get_backend(device).fuse(node1, node2)
|
| 1831 |
+
fused_nodes.remove(node1)
|
| 1832 |
+
fused_nodes.remove(node2)
|
| 1833 |
+
fused_nodes.add(node3)
|
| 1834 |
+
self.name_to_fused_node.update(
|
| 1835 |
+
{n.get_name(): node3 for n in node3.get_nodes()}
|
| 1836 |
+
)
|
| 1837 |
+
self.nodes = sorted(fused_nodes, key=lambda x: x.min_order)
|
| 1838 |
+
self.topological_sort_schedule()
|
| 1839 |
+
self.prune_redundant_deps()
|
| 1840 |
+
|
| 1841 |
+
def prune_redundant_deps(self):
|
| 1842 |
+
for node in self.nodes:
|
| 1843 |
+
node.prune_redundant_deps(self.name_to_fused_node)
|
| 1844 |
+
|
| 1845 |
+
def get_possible_fusions(self):
|
| 1846 |
+
"""
|
| 1847 |
+
Helper to find all legal fusion opportunities, sorted by self.score_fusion()
|
| 1848 |
+
"""
|
| 1849 |
+
possible_fusions = []
|
| 1850 |
+
seen = set()
|
| 1851 |
+
|
| 1852 |
+
def check_all_pairs(nodes):
|
| 1853 |
+
for node1_index, node1 in enumerate(nodes):
|
| 1854 |
+
for node2 in nodes[node1_index + 1 :]:
|
| 1855 |
+
key = (node1, node2)
|
| 1856 |
+
if key in seen:
|
| 1857 |
+
continue
|
| 1858 |
+
seen.add(key)
|
| 1859 |
+
|
| 1860 |
+
if self.can_fuse(node1, node2):
|
| 1861 |
+
possible_fusions.append(key)
|
| 1862 |
+
elif (node2.is_template() or node2.is_foreach()) and self.can_fuse(
|
| 1863 |
+
node2, node1
|
| 1864 |
+
):
|
| 1865 |
+
# foreach fusions and epilogue fusions are order dependent
|
| 1866 |
+
possible_fusions.append((node2, node1))
|
| 1867 |
+
|
| 1868 |
+
buffer_names_grouping = collections.defaultdict(list)
|
| 1869 |
+
for node in self.nodes:
|
| 1870 |
+
for buf in node.used_buffer_names():
|
| 1871 |
+
buffer_names_grouping[buf].append(node)
|
| 1872 |
+
for node_grouping in buffer_names_grouping.values():
|
| 1873 |
+
check_all_pairs(node_grouping)
|
| 1874 |
+
|
| 1875 |
+
if config.aggressive_fusion:
|
| 1876 |
+
group_grouping = collections.defaultdict(list)
|
| 1877 |
+
for node in self.nodes:
|
| 1878 |
+
group = getattr(node, "group", None)
|
| 1879 |
+
if group:
|
| 1880 |
+
group_grouping[group].append(node)
|
| 1881 |
+
for node_grouping in group_grouping.values():
|
| 1882 |
+
check_all_pairs(node_grouping)
|
| 1883 |
+
|
| 1884 |
+
possible_fusions.sort(key=self.score_fusion_key, reverse=True)
|
| 1885 |
+
fusion_log.debug("found %d possible fusions", len(possible_fusions))
|
| 1886 |
+
return possible_fusions
|
| 1887 |
+
|
| 1888 |
+
def will_fusion_create_cycle(self, node1, node2):
|
| 1889 |
+
"""
|
| 1890 |
+
Finds whether there's a path from node1 to node2 (or vice-versa)
|
| 1891 |
+
caused indirectly by other fusions.
|
| 1892 |
+
"""
|
| 1893 |
+
|
| 1894 |
+
def found_path(node):
|
| 1895 |
+
# only fused nodes can introduce new ancestors.
|
| 1896 |
+
if isinstance(node, FusedSchedulerNode) and node not in visited:
|
| 1897 |
+
visited.add(node)
|
| 1898 |
+
if node.get_names().issubset(combined_ancestors):
|
| 1899 |
+
# All fusion outputs are in ancestors of node1 and node2, thus
|
| 1900 |
+
# cannot introduce new path:
|
| 1901 |
+
#
|
| 1902 |
+
# 1. if output is neither descendent of node1 or node2, the
|
| 1903 |
+
# output cannot introduce a path
|
| 1904 |
+
# 2. due to [can_fuse]: if WLOG output is descendent of node1, it cannot be
|
| 1905 |
+
# on path(node1->node2), hence it cannot be ancestor of node2
|
| 1906 |
+
# 3. due to [acyclic]: if WLOG output is descendent of node1, it cannot be
|
| 1907 |
+
# ancestor of node1
|
| 1908 |
+
return False
|
| 1909 |
+
else:
|
| 1910 |
+
# continue DFS of new ancestors introduced by the fusion
|
| 1911 |
+
return bool(combined_names & node.ancestors) or any(
|
| 1912 |
+
found_path(self.name_to_fused_node[n])
|
| 1913 |
+
for n in node.ancestors - combined_ancestors
|
| 1914 |
+
)
|
| 1915 |
+
return False
|
| 1916 |
+
|
| 1917 |
+
visited = set()
|
| 1918 |
+
combined_names = node1.get_names() | node2.get_names()
|
| 1919 |
+
combined_ancestors = (node1.ancestors | node2.ancestors) - combined_names
|
| 1920 |
+
cycle = any(found_path(self.name_to_fused_node[n]) for n in combined_ancestors)
|
| 1921 |
+
if cycle:
|
| 1922 |
+
WhyNoFuse(node1, node2)("will create cycle")
|
| 1923 |
+
return cycle
|
| 1924 |
+
|
| 1925 |
+
def can_fusion_increase_peak_memory(
|
| 1926 |
+
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
|
| 1927 |
+
):
|
| 1928 |
+
"""
|
| 1929 |
+
This function prevents fusion for nodes that can increase memory
|
| 1930 |
+
footprint. This problem is more common in horizontal fusion, where nodes
|
| 1931 |
+
that are far apart in the original order get fused, lengthening the live
|
| 1932 |
+
intervals of tensors. This is very evident in models with activation
|
| 1933 |
+
checkpointing, where the recomputed nodes from different checkpointed
|
| 1934 |
+
regions get fused and significantly increase the memory footprint.
|
| 1935 |
+
|
| 1936 |
+
The current attempt is a quick, possibly hacky, heuristic to prevent the
|
| 1937 |
+
fusion of nodes that are far away in the original order.
|
| 1938 |
+
|
| 1939 |
+
A better but difficult to implement heurisitic would be to use live
|
| 1940 |
+
intervals of the buffers, find region of peak pressure in the original
|
| 1941 |
+
program and prevent fusion that crosses that peak region. We might need
|
| 1942 |
+
special care or good approximation in this implementation, as fusion of
|
| 1943 |
+
node changes live intervals, and re-computing live intervals and peak
|
| 1944 |
+
memory after each fusion can introduce large compilation overhead.
|
| 1945 |
+
"""
|
| 1946 |
+
proximity_score = max(
|
| 1947 |
+
abs(node1.min_order - node2.max_order),
|
| 1948 |
+
abs(node2.min_order - node1.max_order),
|
| 1949 |
+
)
|
| 1950 |
+
return proximity_score > 64
|
| 1951 |
+
|
| 1952 |
+
def can_fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 1953 |
+
"""
|
| 1954 |
+
Determine if it is possible to combine node1 and node2 into a
|
| 1955 |
+
single fused node.
|
| 1956 |
+
"""
|
| 1957 |
+
|
| 1958 |
+
if node1 is node2:
|
| 1959 |
+
return False
|
| 1960 |
+
|
| 1961 |
+
why = WhyNoFuse(node1, node2)
|
| 1962 |
+
|
| 1963 |
+
if (
|
| 1964 |
+
isinstance(node1, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
|
| 1965 |
+
and not node1.is_template()
|
| 1966 |
+
):
|
| 1967 |
+
why("node1 is extern or nop")
|
| 1968 |
+
return False
|
| 1969 |
+
if (
|
| 1970 |
+
isinstance(node2, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
|
| 1971 |
+
and not node2.is_template()
|
| 1972 |
+
):
|
| 1973 |
+
why("node2 is extern or nop")
|
| 1974 |
+
return False
|
| 1975 |
+
|
| 1976 |
+
if node2.get_names() & node1.ancestors:
|
| 1977 |
+
why("node1 must go before node2")
|
| 1978 |
+
return False
|
| 1979 |
+
|
| 1980 |
+
if (
|
| 1981 |
+
isinstance(node1, (FusedSchedulerNode, SchedulerNode))
|
| 1982 |
+
and isinstance(node2, SchedulerNode)
|
| 1983 |
+
and isinstance(node2._body, ir.LoopBody)
|
| 1984 |
+
):
|
| 1985 |
+
# Fix issue: https://github.com/pytorch/pytorch/issues/108963
|
| 1986 |
+
# Check:
|
| 1987 |
+
# If node2 reads a buf which is a mutation buf of node1(SchedulerNode) or among nodes in node1(FusedSchedulerNode),
|
| 1988 |
+
# we will get the corresponding mutation buf and check if this mutation buf is stored by atomic_add mode.
|
| 1989 |
+
# If True, we will disable the fusion of node1 and node2.
|
| 1990 |
+
if any(
|
| 1991 |
+
(
|
| 1992 |
+
node2_used_buf in self.mutation_renames
|
| 1993 |
+
and node1.has_atomic_add(self.mutation_renames[node2_used_buf])
|
| 1994 |
+
)
|
| 1995 |
+
for node2_used_buf in node2._body.reads_name2expr.keys()
|
| 1996 |
+
):
|
| 1997 |
+
return False
|
| 1998 |
+
|
| 1999 |
+
if node2.is_template():
|
| 2000 |
+
why("templates can only fuse epilogues")
|
| 2001 |
+
return False
|
| 2002 |
+
if node1.is_template() and (
|
| 2003 |
+
node2.has_aliasing_or_mutation()
|
| 2004 |
+
or node2.is_reduction()
|
| 2005 |
+
or not config.epilogue_fusion
|
| 2006 |
+
):
|
| 2007 |
+
why("template epilogue not satisfied")
|
| 2008 |
+
return False
|
| 2009 |
+
|
| 2010 |
+
device = node1.get_device()
|
| 2011 |
+
device2 = node2.get_device()
|
| 2012 |
+
if device != device2:
|
| 2013 |
+
why("device mismatch (%s vs %s)", device, device2)
|
| 2014 |
+
return False
|
| 2015 |
+
del device2
|
| 2016 |
+
|
| 2017 |
+
no_shared_data = self.score_fusion_memory(node1, node2) == 0
|
| 2018 |
+
if no_shared_data and (
|
| 2019 |
+
not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()
|
| 2020 |
+
):
|
| 2021 |
+
why("no shared data")
|
| 2022 |
+
return False # heuristic not needed for correctness
|
| 2023 |
+
|
| 2024 |
+
if (
|
| 2025 |
+
not node1.is_foreach()
|
| 2026 |
+
and not node2.is_foreach()
|
| 2027 |
+
and len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size
|
| 2028 |
+
):
|
| 2029 |
+
why("exceeds max fusion")
|
| 2030 |
+
return False # heuristic not needed for correctness
|
| 2031 |
+
|
| 2032 |
+
if node1.get_names() & node2.ancestors:
|
| 2033 |
+
# node2 depends on node1 outputs
|
| 2034 |
+
if not self.can_fuse_vertical(node1, node2):
|
| 2035 |
+
return False
|
| 2036 |
+
return self.get_backend(device).can_fuse_vertical(node1, node2)
|
| 2037 |
+
else: # nodes don't depend on each other, but may have common reads
|
| 2038 |
+
if self.can_fusion_increase_peak_memory(node1, node2):
|
| 2039 |
+
why("will increase peak memory")
|
| 2040 |
+
return False
|
| 2041 |
+
return self.get_backend(device).can_fuse_horizontal(node1, node2)
|
| 2042 |
+
|
| 2043 |
+
def can_fuse_vertical(self, node1, node2):
|
| 2044 |
+
"""
|
| 2045 |
+
Check if it is legal to fuse a consumer (node2) into a producer (node1).
|
| 2046 |
+
|
| 2047 |
+
We can fuse them if all the reads of node2 either match
|
| 2048 |
+
corresponding writes in node1, or are written by nodes that can
|
| 2049 |
+
be scheduled before the fusion of node1 and node2.
|
| 2050 |
+
|
| 2051 |
+
We also disable fusion of a write subsequent to a read if the reads
|
| 2052 |
+
and writes do not align.
|
| 2053 |
+
"""
|
| 2054 |
+
node1_names = node1.get_names()
|
| 2055 |
+
computed_deps = set()
|
| 2056 |
+
why = WhyNoFuse(node1, node2)
|
| 2057 |
+
|
| 2058 |
+
# StarDep doesn't match MemoryDep, different indices don't match
|
| 2059 |
+
# However, broadcasting sometimes strips dimensions, and if that's the case
|
| 2060 |
+
# we still can match unmet dep
|
| 2061 |
+
# if there's indirect indexing, don't match it
|
| 2062 |
+
def fusable_read_and_write(read: Dep, write: Dep):
|
| 2063 |
+
return (
|
| 2064 |
+
self.mutation_renames.get(read.name, read.name) == write.name
|
| 2065 |
+
and (isinstance(read, MemoryDep) and isinstance(write, MemoryDep))
|
| 2066 |
+
and not free_symbol_has(read.index, "tmp")
|
| 2067 |
+
and not free_symbol_has(write.index, "tmp")
|
| 2068 |
+
and read.index == write.index
|
| 2069 |
+
and len(read.size) >= len(write.size)
|
| 2070 |
+
and read.size[: len(write.size)] == write.size
|
| 2071 |
+
)
|
| 2072 |
+
|
| 2073 |
+
for rd in node2.unmet_dependencies:
|
| 2074 |
+
for cd in node1.read_writes.writes:
|
| 2075 |
+
if fusable_read_and_write(rd, cd):
|
| 2076 |
+
computed_deps.add(rd)
|
| 2077 |
+
|
| 2078 |
+
remaining_deps = {dep.name for dep in node2.unmet_dependencies - computed_deps}
|
| 2079 |
+
if remaining_deps & node1_names:
|
| 2080 |
+
# MemoryDeps didn't match and read different locations of the same buffer.
|
| 2081 |
+
# Examples here include:
|
| 2082 |
+
# - MemoryDep("foo", x) != MemoryDep("foo", x + 1)
|
| 2083 |
+
# - MemoryDep("foo", x) != StarDep("foo")
|
| 2084 |
+
why("memory deps did not match")
|
| 2085 |
+
return False
|
| 2086 |
+
for name in remaining_deps:
|
| 2087 |
+
if node1_names & self.name_to_fused_node[name].ancestors:
|
| 2088 |
+
why("intermediate nodes between node1 & node2")
|
| 2089 |
+
return False
|
| 2090 |
+
|
| 2091 |
+
# similar to can_inplace, if we are going to fuse a write subsequent to a read
|
| 2092 |
+
# require that the indexing and size is the same
|
| 2093 |
+
for write in node2.read_writes.writes:
|
| 2094 |
+
for read in node1.read_writes.reads:
|
| 2095 |
+
if write.name != self.mutation_renames.get(read.name, read.name):
|
| 2096 |
+
continue
|
| 2097 |
+
|
| 2098 |
+
# bail on StarDep
|
| 2099 |
+
if not fusable_read_and_write(read=read, write=write):
|
| 2100 |
+
why("fusing a write into a read with different indexing formula")
|
| 2101 |
+
return False
|
| 2102 |
+
|
| 2103 |
+
return True
|
| 2104 |
+
|
| 2105 |
+
def score_fusion(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 2106 |
+
"""
|
| 2107 |
+
Assign a score (higher comes first) to the fusion of node1
|
| 2108 |
+
and node2. When different fusions conflict with each other,
|
| 2109 |
+
this is the way we decide what order to run them in.
|
| 2110 |
+
|
| 2111 |
+
Our current score is based on:
|
| 2112 |
+
- Estimate of the saved memory operations
|
| 2113 |
+
- Fusions closer together in original order
|
| 2114 |
+
"""
|
| 2115 |
+
memory_score = self.score_fusion_memory(node1, node2)
|
| 2116 |
+
proximity_score = -max(
|
| 2117 |
+
abs(node1.min_order - node2.max_order),
|
| 2118 |
+
abs(node2.min_order - node1.max_order),
|
| 2119 |
+
)
|
| 2120 |
+
return (
|
| 2121 |
+
node1.is_template() == config.epilogue_fusion_first and memory_score > 0,
|
| 2122 |
+
node1.is_reduction() == node2.is_reduction() and memory_score > 0,
|
| 2123 |
+
memory_score,
|
| 2124 |
+
proximity_score,
|
| 2125 |
+
)
|
| 2126 |
+
|
| 2127 |
+
def score_fusion_memory(self, node1, node2):
|
| 2128 |
+
"""
|
| 2129 |
+
The first term in our fusion score that estimates number of saved memory operations.
|
| 2130 |
+
"""
|
| 2131 |
+
common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (
|
| 2132 |
+
node2.read_writes.reads | node2.read_writes.writes
|
| 2133 |
+
)
|
| 2134 |
+
common_memory_deps = {
|
| 2135 |
+
dep for dep in common_memory_deps if not dep.has_unbacked_symbols()
|
| 2136 |
+
}
|
| 2137 |
+
return sum(dep.numbytes_hint() for dep in common_memory_deps)
|
| 2138 |
+
|
| 2139 |
+
def score_fusion_key(self, nodes):
|
| 2140 |
+
"""
|
| 2141 |
+
Shim for list.sort(key=...)
|
| 2142 |
+
"""
|
| 2143 |
+
node1, node2 = nodes
|
| 2144 |
+
return self.score_fusion(node1, node2)
|
| 2145 |
+
|
| 2146 |
+
def compute_last_usage(self):
|
| 2147 |
+
"""
|
| 2148 |
+
Populate node.last_usage recursively (also for the nodes within a FusedSchedulerNode)
|
| 2149 |
+
"""
|
| 2150 |
+
|
| 2151 |
+
future_used_buffers = set()
|
| 2152 |
+
for node_name in V.graph.get_output_names():
|
| 2153 |
+
future_used_buffers.add(node_name)
|
| 2154 |
+
|
| 2155 |
+
for node in reversed(self.nodes):
|
| 2156 |
+
node.set_last_usage(future_used_buffers, self.mutation_real_name)
|
| 2157 |
+
future_used_buffers.update(node.last_usage)
|
| 2158 |
+
|
| 2159 |
+
def free_buffers(self):
|
| 2160 |
+
"""Free any buffers that are no longer needed"""
|
| 2161 |
+
for name in sorted(
|
| 2162 |
+
self.buffer_names_to_free
|
| 2163 |
+
- V.graph.removed_buffers
|
| 2164 |
+
- V.graph.wrapper_code.freed
|
| 2165 |
+
):
|
| 2166 |
+
if name in self.name_to_node:
|
| 2167 |
+
node = self.name_to_node[name]
|
| 2168 |
+
if node.can_free():
|
| 2169 |
+
V.graph.wrapper_code.codegen_free(node.node)
|
| 2170 |
+
elif name in V.graph.graph_inputs:
|
| 2171 |
+
storage = V.graph.graph_inputs[name].data
|
| 2172 |
+
assert isinstance(storage, ir.StorageBox) and storage.is_input_buffer()
|
| 2173 |
+
V.graph.wrapper_code.codegen_free(storage.data)
|
| 2174 |
+
|
| 2175 |
+
self.buffer_names_to_free.clear()
|
| 2176 |
+
|
| 2177 |
+
def remove_kernel_local_buffers(self):
|
| 2178 |
+
"""
|
| 2179 |
+
Any buffers that are both created and have a last use in the
|
| 2180 |
+
same kernel can be removed.
|
| 2181 |
+
"""
|
| 2182 |
+
|
| 2183 |
+
# V.kernel.store_buffer_names should represent the set of nodes
|
| 2184 |
+
# get fused
|
| 2185 |
+
fused_node_names = V.kernel.store_buffer_names
|
| 2186 |
+
names_to_remove = []
|
| 2187 |
+
for out_buf in V.kernel.store_buffer_names:
|
| 2188 |
+
users = self.name_to_node[out_buf].users
|
| 2189 |
+
assert users is not None
|
| 2190 |
+
users = {user.get_name() for user in users if not user.is_weak}
|
| 2191 |
+
if users.issubset(fused_node_names):
|
| 2192 |
+
names_to_remove.append(out_buf)
|
| 2193 |
+
|
| 2194 |
+
def remove_filter(n):
|
| 2195 |
+
return (
|
| 2196 |
+
n not in V.kernel.must_keep_buffers
|
| 2197 |
+
and n not in V.kernel.args.input_buffers
|
| 2198 |
+
and n not in self.mutation_renames
|
| 2199 |
+
and n not in self.mutation_real_name
|
| 2200 |
+
)
|
| 2201 |
+
|
| 2202 |
+
names_to_remove = list(filter(remove_filter, names_to_remove))
|
| 2203 |
+
|
| 2204 |
+
for name in names_to_remove:
|
| 2205 |
+
if name in V.kernel.args.inplace_buffers:
|
| 2206 |
+
buf = V.kernel.args.inplace_buffers[name]
|
| 2207 |
+
if isinstance(buf, str) and buf.startswith("REMOVED"):
|
| 2208 |
+
continue
|
| 2209 |
+
remove = all(n in names_to_remove for n in buf.other_names)
|
| 2210 |
+
if remove:
|
| 2211 |
+
self.remove_inplace_buffer(name)
|
| 2212 |
+
V.kernel.inplaced_to_remove.add(name)
|
| 2213 |
+
else:
|
| 2214 |
+
self.remove_buffer(name)
|
| 2215 |
+
|
| 2216 |
+
def remove_buffer(self, name):
|
| 2217 |
+
# Assign a special value instead of deleting the entry
|
| 2218 |
+
# because we still rely on output_buffers's length to
|
| 2219 |
+
# generate unique arg name.
|
| 2220 |
+
log.debug("remove_buffer(%r)", name)
|
| 2221 |
+
V.kernel.args.output_buffers[name] = "REMOVED"
|
| 2222 |
+
V.kernel.removed_buffers.add(name)
|
| 2223 |
+
|
| 2224 |
+
def remove_inplace_buffer(self, name):
|
| 2225 |
+
log.debug("removing_inplace_buffer(%r)", name)
|
| 2226 |
+
inner_name = V.kernel.args.inplace_buffers[name].inner_name
|
| 2227 |
+
V.kernel.args.inplace_buffers[name] = inner_name.replace(
|
| 2228 |
+
"in_out_ptr", "REMOVED"
|
| 2229 |
+
)
|
| 2230 |
+
V.kernel.removed_buffers.add(name)
|
| 2231 |
+
|
| 2232 |
+
def flush(self):
|
| 2233 |
+
for backend in self.backends.values():
|
| 2234 |
+
backend.flush()
|
| 2235 |
+
self.free_buffers()
|
| 2236 |
+
|
| 2237 |
+
def codegen_extern_call(self, scheduler_node: ExternKernelSchedulerNode):
|
| 2238 |
+
assert isinstance(scheduler_node, ExternKernelSchedulerNode)
|
| 2239 |
+
# 'decide_inplace_update' stores the inplace update decisions in
|
| 2240 |
+
# the current kernel from where 'allocate' retrieve those decisions.
|
| 2241 |
+
# We have to make sure there is a non-NULL kernel handler to store
|
| 2242 |
+
# those inplace update decisions.
|
| 2243 |
+
with V.set_kernel_handler(Kernel(increase_kernel_count=False)):
|
| 2244 |
+
scheduler_node.decide_inplace_update()
|
| 2245 |
+
scheduler_node.allocate()
|
| 2246 |
+
node = scheduler_node.node
|
| 2247 |
+
assert isinstance(node, ir.ExternKernel), f"{type(node)=}"
|
| 2248 |
+
node.codegen(V.graph.wrapper_code)
|
| 2249 |
+
self.free_buffers()
|
| 2250 |
+
|
| 2251 |
+
def create_backend(self, device: torch.device):
|
| 2252 |
+
assert (
|
| 2253 |
+
device.type != "cuda" or device.index is not None
|
| 2254 |
+
), f"{device} should have been normalized in lowering"
|
| 2255 |
+
V.graph.add_device_info(device)
|
| 2256 |
+
|
| 2257 |
+
device_scheduling = get_scheduling_for_device(device.type)
|
| 2258 |
+
if device_scheduling is None:
|
| 2259 |
+
raise RuntimeError(f"Unsupported device type: {device.type}")
|
| 2260 |
+
|
| 2261 |
+
if device.type == "cuda" and not has_triton():
|
| 2262 |
+
device_props = torch.cuda.get_device_properties(device)
|
| 2263 |
+
if device_props.major < 7:
|
| 2264 |
+
raise RuntimeError(
|
| 2265 |
+
f"Found {device_props.name} which is too old to be supported by the triton GPU compiler, which is used as the backend. Triton only supports devices of CUDA Capability >= 7.0, but your device is of CUDA capability {device_props.major}.{device_props.minor}" # noqa: B950
|
| 2266 |
+
)
|
| 2267 |
+
else:
|
| 2268 |
+
raise RuntimeError(
|
| 2269 |
+
"Cannot find a working triton installation. More information on installing Triton can be found at https://github.com/openai/triton" # noqa: B950
|
| 2270 |
+
)
|
| 2271 |
+
|
| 2272 |
+
return device_scheduling(self)
|
| 2273 |
+
|
| 2274 |
+
def get_backend(self, device: torch.device):
|
| 2275 |
+
if device not in self.backends:
|
| 2276 |
+
self.backends[device] = self.create_backend(device)
|
| 2277 |
+
return self.backends[device]
|
| 2278 |
+
|
| 2279 |
+
def enter_context(self, node):
|
| 2280 |
+
def get_order(n):
|
| 2281 |
+
if n not in self.origin_to_index:
|
| 2282 |
+
self.origin_to_index.update({n: i for i, n in enumerate(n.graph.nodes)})
|
| 2283 |
+
return self.origin_to_index[n]
|
| 2284 |
+
|
| 2285 |
+
# Use a dict to have ordering
|
| 2286 |
+
origins = {
|
| 2287 |
+
(get_order(e), e): None for n in node.get_nodes() for e in n.node.origins
|
| 2288 |
+
}
|
| 2289 |
+
origins = list(origins.keys())
|
| 2290 |
+
if origins:
|
| 2291 |
+
_, last = max(origins, key=operator.itemgetter(0))
|
| 2292 |
+
V.graph.wrapper_code.enter_context(last)
|
| 2293 |
+
|
| 2294 |
+
@dynamo_timed
|
| 2295 |
+
def codegen(self):
|
| 2296 |
+
for node in self.nodes:
|
| 2297 |
+
try:
|
| 2298 |
+
log.debug(
|
| 2299 |
+
"Generating code for node %s with estimated runtime %f",
|
| 2300 |
+
node.get_name(),
|
| 2301 |
+
node.get_estimated_runtime(),
|
| 2302 |
+
)
|
| 2303 |
+
except Exception as e:
|
| 2304 |
+
log.debug(
|
| 2305 |
+
"Generating code for node %s with estimated runtime 0.0",
|
| 2306 |
+
node.get_name(),
|
| 2307 |
+
)
|
| 2308 |
+
|
| 2309 |
+
self.enter_context(node)
|
| 2310 |
+
|
| 2311 |
+
if not isinstance(node, NopKernelSchedulerNode):
|
| 2312 |
+
device = node.get_device()
|
| 2313 |
+
if (
|
| 2314 |
+
device != self.current_device
|
| 2315 |
+
or node.is_extern()
|
| 2316 |
+
or node.is_template()
|
| 2317 |
+
):
|
| 2318 |
+
self.flush()
|
| 2319 |
+
if device != self.current_device:
|
| 2320 |
+
if device.type == "cuda":
|
| 2321 |
+
if self.current_device and self.current_device.type == "cuda":
|
| 2322 |
+
V.graph.wrapper_code.codegen_device_guard_exit()
|
| 2323 |
+
assert device.index is not None, "device should have an index"
|
| 2324 |
+
V.graph.wrapper_code.codegen_device_guard_enter(device.index)
|
| 2325 |
+
elif self.current_device and self.current_device.type == "cuda":
|
| 2326 |
+
V.graph.wrapper_code.codegen_device_guard_exit()
|
| 2327 |
+
self.current_device = device
|
| 2328 |
+
|
| 2329 |
+
self.buffer_names_to_free.update(node.last_usage)
|
| 2330 |
+
|
| 2331 |
+
if node.is_template():
|
| 2332 |
+
node, *epilogue = node.get_nodes()
|
| 2333 |
+
self.get_backend(device).codegen_template(node, epilogue) # type: ignore[possibly-undefined]
|
| 2334 |
+
elif node.is_extern():
|
| 2335 |
+
self.codegen_extern_call(node)
|
| 2336 |
+
elif node.is_foreach():
|
| 2337 |
+
self.get_backend(device).codegen_foreach(node) # type: ignore[possibly-undefined]
|
| 2338 |
+
elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
|
| 2339 |
+
self.get_backend(device).codegen_nodes(node.get_nodes()) # type: ignore[possibly-undefined]
|
| 2340 |
+
else:
|
| 2341 |
+
assert isinstance(node, NopKernelSchedulerNode)
|
| 2342 |
+
node.allocate()
|
| 2343 |
+
|
| 2344 |
+
if config.debug_check_inf_and_nan:
|
| 2345 |
+
V.graph.wrapper_code.generate_inf_and_nan_checker(node)
|
| 2346 |
+
|
| 2347 |
+
if config.triton.debug_sync_kernel:
|
| 2348 |
+
self.get_backend(device).codegen_sync() # type: ignore[possibly-undefined]
|
| 2349 |
+
|
| 2350 |
+
self.available_buffer_names.update(node.get_names())
|
| 2351 |
+
|
| 2352 |
+
if not isinstance(node, NopKernelSchedulerNode):
|
| 2353 |
+
device = node.get_device()
|
| 2354 |
+
if self.get_backend(device).ready_to_flush():
|
| 2355 |
+
self.flush()
|
| 2356 |
+
|
| 2357 |
+
if self.current_device and self.current_device.type == "cuda":
|
| 2358 |
+
# exit the outermost CUDA device guard. this is
|
| 2359 |
+
# important for nested indentation codegen-ing.
|
| 2360 |
+
V.graph.wrapper_code.codegen_device_guard_exit()
|
| 2361 |
+
|
| 2362 |
+
self.flush()
|
| 2363 |
+
|
| 2364 |
+
def is_unaligned_buffer(self, buf_name):
|
| 2365 |
+
if buf_name in V.graph.graph_inputs or buf_name in V.graph.constants:
|
| 2366 |
+
# all graph inputs or constants are assumed to be aligned
|
| 2367 |
+
return False
|
| 2368 |
+
node = self.name_to_node[buf_name]
|
| 2369 |
+
layout = node.node.get_layout()
|
| 2370 |
+
if isinstance(layout, ir.AliasedLayout):
|
| 2371 |
+
return not layout.maybe_guard_aligned()
|
| 2372 |
+
else:
|
| 2373 |
+
return False
|
| 2374 |
+
|
| 2375 |
+
|
| 2376 |
+
class BaseScheduling:
|
| 2377 |
+
def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 2378 |
+
"""
|
| 2379 |
+
Check whether node1 and node2 can be vertically fused or not.
|
| 2380 |
+
"""
|
| 2381 |
+
raise NotImplementedError()
|
| 2382 |
+
|
| 2383 |
+
def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 2384 |
+
"""
|
| 2385 |
+
Check whether node1 and node2 can be horizontally fused or not.
|
| 2386 |
+
"""
|
| 2387 |
+
raise NotImplementedError()
|
| 2388 |
+
|
| 2389 |
+
def fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 2390 |
+
"""
|
| 2391 |
+
Fuse two nodes
|
| 2392 |
+
"""
|
| 2393 |
+
if node1.is_foreach() or node2.is_foreach():
|
| 2394 |
+
return ForeachKernelSchedulerNode.fuse(node1, node2)
|
| 2395 |
+
else:
|
| 2396 |
+
return FusedSchedulerNode.fuse(node1, node2)
|
| 2397 |
+
|
| 2398 |
+
def group_fn(self, sizes):
|
| 2399 |
+
"""
|
| 2400 |
+
Process the iteration sizes in case a transformation needs to be applied.
|
| 2401 |
+
"""
|
| 2402 |
+
raise NotImplementedError()
|
| 2403 |
+
|
| 2404 |
+
def codegen_template(
|
| 2405 |
+
self, template_node: SchedulerNode, epilogue_nodes: List[SchedulerNode]
|
| 2406 |
+
):
|
| 2407 |
+
"""
|
| 2408 |
+
Given a template node, generate a kernel.
|
| 2409 |
+
|
| 2410 |
+
This function is only available for triton now. If the third-party backend behaves as a sub-class
|
| 2411 |
+
of TritonScheduling, it can override it or reuse it.
|
| 2412 |
+
"""
|
| 2413 |
+
raise NotImplementedError()
|
| 2414 |
+
|
| 2415 |
+
def codegen_nodes(self, nodes: List[SchedulerNode]):
|
| 2416 |
+
"""
|
| 2417 |
+
Generate a kernel given a list of pre-fused nodes.
|
| 2418 |
+
"""
|
| 2419 |
+
raise NotImplementedError()
|
| 2420 |
+
|
| 2421 |
+
def codegen_sync(self):
|
| 2422 |
+
"""
|
| 2423 |
+
Generate synchronization code for the kernel. This method depends on the hardware characteristics.
|
| 2424 |
+
"""
|
| 2425 |
+
raise NotImplementedError()
|
| 2426 |
+
|
| 2427 |
+
def ready_to_flush(self) -> bool:
|
| 2428 |
+
"""
|
| 2429 |
+
Check whether the backend is requesting the scheduler to flush the generated kernel.
|
| 2430 |
+
If not supported, please return False.
|
| 2431 |
+
"""
|
| 2432 |
+
return False
|
| 2433 |
+
|
| 2434 |
+
def flush(self):
|
| 2435 |
+
"""
|
| 2436 |
+
Flush the generated kernel and python wrapper code to the source code file.
|
| 2437 |
+
"""
|
| 2438 |
+
raise NotImplementedError()
|
| 2439 |
+
|
| 2440 |
+
def benchmark_fused_nodes(self, nodes):
|
| 2441 |
+
"""
|
| 2442 |
+
Benchmark fused list of nodes and return the execution time
|
| 2443 |
+
in milliseconds on randomly generated inputs.
|
| 2444 |
+
"""
|
| 2445 |
+
raise NotImplementedError()
|
vila/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py
ADDED
|
@@ -0,0 +1,1156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import operator
|
| 7 |
+
import sys
|
| 8 |
+
import textwrap
|
| 9 |
+
import time
|
| 10 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 11 |
+
from io import StringIO
|
| 12 |
+
|
| 13 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 14 |
+
from unittest.mock import patch
|
| 15 |
+
|
| 16 |
+
import sympy
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch._dynamo.testing import rand_strided
|
| 20 |
+
from torch._dynamo.utils import counters, identity, preserve_rng_state
|
| 21 |
+
|
| 22 |
+
from . import config, ir
|
| 23 |
+
from .autotune_process import TensorMeta, TritonBenchmarkRequest
|
| 24 |
+
from .codecache import code_hash, PersistentCache, PyCodeCache
|
| 25 |
+
from .codegen.common import (
|
| 26 |
+
ChoiceCaller,
|
| 27 |
+
IndentedBuffer,
|
| 28 |
+
KernelTemplate,
|
| 29 |
+
PrimitiveInfoType,
|
| 30 |
+
)
|
| 31 |
+
from .codegen.triton import (
|
| 32 |
+
gen_common_triton_imports,
|
| 33 |
+
texpr,
|
| 34 |
+
TritonKernel,
|
| 35 |
+
TritonPrinter,
|
| 36 |
+
TritonScheduling,
|
| 37 |
+
)
|
| 38 |
+
from .codegen.triton_utils import config_of, signature_to_meta
|
| 39 |
+
from .exc import CUDACompileError
|
| 40 |
+
from .utils import (
|
| 41 |
+
do_bench,
|
| 42 |
+
get_dtype_size,
|
| 43 |
+
Placeholder,
|
| 44 |
+
sympy_dot,
|
| 45 |
+
sympy_product,
|
| 46 |
+
unique,
|
| 47 |
+
)
|
| 48 |
+
from .virtualized import V
|
| 49 |
+
|
| 50 |
+
log = logging.getLogger(__name__)
|
| 51 |
+
|
| 52 |
+
# correctness checks struggle with fp16/tf32
|
| 53 |
+
VERIFY: Dict[str, Any] = dict()
|
| 54 |
+
PRINT_AUTOTUNE = True
|
| 55 |
+
DEBUG = False
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class KernelNamespace:
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# these objects are imported from the generated wrapper code
|
| 63 |
+
extern_kernels = KernelNamespace()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class PartialRender:
|
| 67 |
+
"""
|
| 68 |
+
Some parts of a template need to be generated at the end, but
|
| 69 |
+
inserted into the template at the start. This allows doing a bunch
|
| 70 |
+
of replacements after the initial render.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(self, code, replacement_hooks):
|
| 74 |
+
super().__init__()
|
| 75 |
+
self.code = code
|
| 76 |
+
self.replacement_hooks = replacement_hooks
|
| 77 |
+
|
| 78 |
+
def finalize(self):
|
| 79 |
+
code = self.code
|
| 80 |
+
assert code is not None, "can only be called once"
|
| 81 |
+
self.code = None
|
| 82 |
+
for key, fn in self.replacement_hooks.items():
|
| 83 |
+
code = code.replace(key, fn())
|
| 84 |
+
return code
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class TritonTemplateKernel(TritonKernel):
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
kernel_name,
|
| 91 |
+
input_nodes,
|
| 92 |
+
output_node,
|
| 93 |
+
defines,
|
| 94 |
+
num_stages,
|
| 95 |
+
num_warps,
|
| 96 |
+
grid_fn,
|
| 97 |
+
meta,
|
| 98 |
+
call_sizes,
|
| 99 |
+
use_jit=True,
|
| 100 |
+
prefix_args=0,
|
| 101 |
+
suffix_args=0,
|
| 102 |
+
epilogue_fn=identity,
|
| 103 |
+
*,
|
| 104 |
+
index_dtype,
|
| 105 |
+
):
|
| 106 |
+
super().__init__(
|
| 107 |
+
sympy_product(output_node.get_size()),
|
| 108 |
+
sympy.Integer(1),
|
| 109 |
+
index_dtype=index_dtype,
|
| 110 |
+
)
|
| 111 |
+
self.input_nodes = input_nodes
|
| 112 |
+
self.output_node = output_node
|
| 113 |
+
self.named_input_nodes = {}
|
| 114 |
+
self.defines = defines
|
| 115 |
+
self.kernel_name = kernel_name
|
| 116 |
+
self.template_mask = None
|
| 117 |
+
self.use_jit = use_jit
|
| 118 |
+
self.num_stages = num_stages
|
| 119 |
+
self.num_warps = num_warps
|
| 120 |
+
self.grid_fn = grid_fn
|
| 121 |
+
self.meta = meta
|
| 122 |
+
self.call_sizes = call_sizes
|
| 123 |
+
# for templates with fixed epilogues
|
| 124 |
+
self.prefix_args = prefix_args
|
| 125 |
+
self.suffix_args = suffix_args
|
| 126 |
+
self.epilogue_fn = epilogue_fn
|
| 127 |
+
self.render_hooks = dict()
|
| 128 |
+
self.triton_meta: Optional[Dict[str, object]] = None
|
| 129 |
+
|
| 130 |
+
def need_numel_args(self):
|
| 131 |
+
return False
|
| 132 |
+
|
| 133 |
+
def estimate_kernel_num_bytes(self):
|
| 134 |
+
"""
|
| 135 |
+
Estimate the total number of bytes this kernel takes.
|
| 136 |
+
For in/out nodes, sizes are counted twice: once for reading and
|
| 137 |
+
once for writing.
|
| 138 |
+
"""
|
| 139 |
+
ninplace_args = len(unique(self.args.inplace_buffers.values()))
|
| 140 |
+
num_bytes = []
|
| 141 |
+
for i, inp in enumerate(itertools.chain(self.input_nodes, (self.output_node,))):
|
| 142 |
+
size = V.graph.sizevars.size_hints(inp.get_size())
|
| 143 |
+
numel = functools.reduce(operator.mul, size)
|
| 144 |
+
dtype_size = get_dtype_size(inp.get_dtype())
|
| 145 |
+
num_bytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))
|
| 146 |
+
return sum(num_bytes)
|
| 147 |
+
|
| 148 |
+
def jit_lines(self):
|
| 149 |
+
if self.use_jit:
|
| 150 |
+
return "@triton.jit"
|
| 151 |
+
|
| 152 |
+
argdefs, _, signature = self.args.python_argdefs()
|
| 153 |
+
triton_meta = {
|
| 154 |
+
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
|
| 155 |
+
"device": V.graph.scheduler.current_device.index,
|
| 156 |
+
"device_type": V.graph.scheduler.current_device.type,
|
| 157 |
+
"constants": {},
|
| 158 |
+
}
|
| 159 |
+
triton_meta["configs"] = [config_of(signature)]
|
| 160 |
+
for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index]
|
| 161 |
+
triton_meta["constants"][arg_num] = 1 # type: ignore[index]
|
| 162 |
+
self.triton_meta = triton_meta
|
| 163 |
+
|
| 164 |
+
inductor_meta = {
|
| 165 |
+
"kernel_name": str(Placeholder.DESCRIPTIVE_NAME),
|
| 166 |
+
"backend_hash": torch.utils._triton.triton_hash_with_backend(),
|
| 167 |
+
}
|
| 168 |
+
if config.profile_bandwidth or config.benchmark_kernel:
|
| 169 |
+
num_gb = self.estimate_kernel_num_bytes() / 1e9
|
| 170 |
+
inductor_meta["kernel_num_gb"] = num_gb
|
| 171 |
+
return f"""
|
| 172 |
+
@triton_heuristics.template(
|
| 173 |
+
num_stages={self.num_stages},
|
| 174 |
+
num_warps={self.num_warps},
|
| 175 |
+
triton_meta={triton_meta!r},
|
| 176 |
+
inductor_meta={inductor_meta!r},
|
| 177 |
+
)
|
| 178 |
+
@triton.jit
|
| 179 |
+
"""
|
| 180 |
+
|
| 181 |
+
def def_kernel(self, *argnames):
|
| 182 |
+
"""
|
| 183 |
+
Hook called from template code to generate function def and
|
| 184 |
+
needed args.
|
| 185 |
+
"""
|
| 186 |
+
assert all(isinstance(x, str) for x in argnames)
|
| 187 |
+
renames = IndentedBuffer(initial_indent=1)
|
| 188 |
+
|
| 189 |
+
named_args = self.input_nodes[
|
| 190 |
+
self.prefix_args : len(self.input_nodes) - self.suffix_args
|
| 191 |
+
]
|
| 192 |
+
|
| 193 |
+
assert len(argnames) == len(named_args), (
|
| 194 |
+
len(argnames),
|
| 195 |
+
len(named_args),
|
| 196 |
+
self.prefix_args,
|
| 197 |
+
len(self.input_nodes),
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
for input_node in self.input_nodes[: self.prefix_args]:
|
| 201 |
+
# get args in correct order
|
| 202 |
+
self.args.input(input_node.get_name())
|
| 203 |
+
|
| 204 |
+
for name, input_node in zip(argnames, named_args):
|
| 205 |
+
arg_name = f"arg_{name}"
|
| 206 |
+
self.named_input_nodes[name] = input_node
|
| 207 |
+
self.args.input_buffers[input_node.get_name()] = arg_name
|
| 208 |
+
|
| 209 |
+
# The args may be duplicated, so renaming must be after args are de-duplicated.
|
| 210 |
+
for name in argnames:
|
| 211 |
+
input_node = self.named_input_nodes[name]
|
| 212 |
+
arg_name = self.args.input_buffers[input_node.get_name()]
|
| 213 |
+
if input_node.get_layout().offset == 0:
|
| 214 |
+
renames.writeline(f"{name} = {arg_name}")
|
| 215 |
+
else:
|
| 216 |
+
offset = texpr(self.rename_indexing(input_node.get_layout().offset))
|
| 217 |
+
renames.writeline(f"{name} = {arg_name} + {offset}")
|
| 218 |
+
|
| 219 |
+
for input_node in self.input_nodes[len(self.input_nodes) - self.suffix_args :]:
|
| 220 |
+
# get args in correct order
|
| 221 |
+
self.args.input(input_node.get_name())
|
| 222 |
+
|
| 223 |
+
def hook():
|
| 224 |
+
# python_argdefs() cannot be run until after the rest of the template lazily adds more args
|
| 225 |
+
arg_defs, *_ = self.args.python_argdefs()
|
| 226 |
+
code = IndentedBuffer()
|
| 227 |
+
code.splice(gen_common_triton_imports())
|
| 228 |
+
code.splice(self.jit_lines())
|
| 229 |
+
code.writeline(f"def {self.kernel_name}({', '.join(arg_defs)}):")
|
| 230 |
+
with code.indent():
|
| 231 |
+
code.splice(self.defines)
|
| 232 |
+
code.splice(renames.getvalue())
|
| 233 |
+
return code.getvalue()
|
| 234 |
+
|
| 235 |
+
assert "<DEF_KERNEL>" not in self.render_hooks
|
| 236 |
+
self.render_hooks["<DEF_KERNEL>"] = hook
|
| 237 |
+
return "<DEF_KERNEL>"
|
| 238 |
+
|
| 239 |
+
def size(self, name: str, index: int):
|
| 240 |
+
"""
|
| 241 |
+
Hook called from template code to get the size of an arg.
|
| 242 |
+
Will add needed args to pass it in if it is dynamic.
|
| 243 |
+
"""
|
| 244 |
+
assert isinstance(index, int)
|
| 245 |
+
if name is None:
|
| 246 |
+
val = self.output_node.get_size()[index]
|
| 247 |
+
else:
|
| 248 |
+
assert isinstance(name, str)
|
| 249 |
+
val = self.named_input_nodes[name].get_size()[index]
|
| 250 |
+
return texpr(self.rename_indexing(val))
|
| 251 |
+
|
| 252 |
+
def stride(self, name, index):
|
| 253 |
+
"""
|
| 254 |
+
Hook called from template code to get the stride of an arg.
|
| 255 |
+
Will add needed args to pass it in if it is dynamic.
|
| 256 |
+
"""
|
| 257 |
+
assert isinstance(index, int)
|
| 258 |
+
if name is None:
|
| 259 |
+
val = self.output_node.get_stride()[index]
|
| 260 |
+
else:
|
| 261 |
+
assert isinstance(name, str)
|
| 262 |
+
val = self.named_input_nodes[name].get_stride()[index]
|
| 263 |
+
return texpr(self.rename_indexing(val))
|
| 264 |
+
|
| 265 |
+
def store_output(self, indices, val, mask):
|
| 266 |
+
"""
|
| 267 |
+
Hook called from template code to store the final output
|
| 268 |
+
(if the buffer hasn't been optimized away), then append any
|
| 269 |
+
epilogue fusions.
|
| 270 |
+
"""
|
| 271 |
+
assert isinstance(indices, (list, tuple))
|
| 272 |
+
assert isinstance(val, str)
|
| 273 |
+
assert isinstance(mask, str)
|
| 274 |
+
assert self.template_mask is None
|
| 275 |
+
indices = list(map(TritonPrinter.paren, indices))
|
| 276 |
+
index_symbols = [sympy.Symbol(x) for x in indices]
|
| 277 |
+
lengths = [V.graph.sizevars.simplify(s) for s in self.output_node.get_size()]
|
| 278 |
+
assert len(indices) == len(lengths)
|
| 279 |
+
|
| 280 |
+
# glue to make generated code use same indexing from template
|
| 281 |
+
for name, range_tree_entry in zip(
|
| 282 |
+
indices, self.range_trees[0].construct_entries(lengths)
|
| 283 |
+
):
|
| 284 |
+
range_tree_entry.set_name(name)
|
| 285 |
+
contiguous_index = sympy_dot(
|
| 286 |
+
ir.FlexibleLayout.contiguous_strides(lengths), index_symbols
|
| 287 |
+
)
|
| 288 |
+
contiguous_index = self.rename_indexing(contiguous_index)
|
| 289 |
+
self.body.writeline("xindex = " + texpr(contiguous_index))
|
| 290 |
+
self.range_trees[0].lookup(sympy.Integer(1), sympy_product(lengths)).set_name(
|
| 291 |
+
"xindex"
|
| 292 |
+
)
|
| 293 |
+
self.template_mask = mask
|
| 294 |
+
self.template_indices = indices
|
| 295 |
+
output_index = self.output_node.get_layout().make_indexer()(index_symbols)
|
| 296 |
+
output_index = self.rename_indexing(output_index)
|
| 297 |
+
if output_index == contiguous_index:
|
| 298 |
+
output_index = sympy.Symbol("xindex")
|
| 299 |
+
|
| 300 |
+
epilogue_args = [val]
|
| 301 |
+
for input_node in itertools.chain(
|
| 302 |
+
self.input_nodes[: self.prefix_args],
|
| 303 |
+
self.input_nodes[len(self.input_nodes) - self.suffix_args :],
|
| 304 |
+
):
|
| 305 |
+
input_node.freeze_layout()
|
| 306 |
+
epilogue_args.append(input_node.make_loader()(index_symbols))
|
| 307 |
+
|
| 308 |
+
V.ops.store(
|
| 309 |
+
self.output_node.get_name(),
|
| 310 |
+
output_index,
|
| 311 |
+
self.epilogue_fn(*epilogue_args),
|
| 312 |
+
)
|
| 313 |
+
self.codegen_body()
|
| 314 |
+
|
| 315 |
+
def hook():
|
| 316 |
+
# more stuff might have been added since the codegen_body above
|
| 317 |
+
self.codegen_body()
|
| 318 |
+
return textwrap.indent(self.body.getvalue(), " ").strip()
|
| 319 |
+
|
| 320 |
+
assert "<STORE_OUTPUT>" not in self.render_hooks
|
| 321 |
+
self.render_hooks["<STORE_OUTPUT>"] = hook
|
| 322 |
+
return "<STORE_OUTPUT>"
|
| 323 |
+
|
| 324 |
+
def render(self, template, kwargs):
|
| 325 |
+
return PartialRender(
|
| 326 |
+
template.render(**self.template_env(), **kwargs),
|
| 327 |
+
self.render_hooks,
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
def make_load(self, name, indices, mask):
|
| 331 |
+
"""
|
| 332 |
+
Optional helper called from template code to generate the code
|
| 333 |
+
needed to load from an tensor.
|
| 334 |
+
"""
|
| 335 |
+
assert isinstance(indices, (list, tuple))
|
| 336 |
+
assert isinstance(name, str)
|
| 337 |
+
assert isinstance(mask, str)
|
| 338 |
+
stride = self.named_input_nodes[name].get_stride()
|
| 339 |
+
indices = list(map(TritonPrinter.paren, indices))
|
| 340 |
+
assert len(indices) == len(stride)
|
| 341 |
+
index = " + ".join(
|
| 342 |
+
f"{texpr(self.rename_indexing(s))} * {i}" for s, i in zip(stride, indices)
|
| 343 |
+
)
|
| 344 |
+
return f"tl.load({name} + ({index}), {mask})"
|
| 345 |
+
|
| 346 |
+
def template_env(self):
|
| 347 |
+
"""
|
| 348 |
+
Generate the namespace visible in the template.
|
| 349 |
+
"""
|
| 350 |
+
return {
|
| 351 |
+
fn.__name__: fn
|
| 352 |
+
for fn in [
|
| 353 |
+
self.def_kernel,
|
| 354 |
+
self.size,
|
| 355 |
+
self.stride,
|
| 356 |
+
self.store_output,
|
| 357 |
+
self.make_load,
|
| 358 |
+
]
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
def indexing(
|
| 362 |
+
self,
|
| 363 |
+
index: sympy.Expr,
|
| 364 |
+
*,
|
| 365 |
+
dense_indexing=False,
|
| 366 |
+
copy_shape=None,
|
| 367 |
+
override_mask=None,
|
| 368 |
+
block_ptr=False,
|
| 369 |
+
):
|
| 370 |
+
"""
|
| 371 |
+
Override the default indexing to use our custom mask and force
|
| 372 |
+
dense indexing.
|
| 373 |
+
"""
|
| 374 |
+
return super().indexing(
|
| 375 |
+
index,
|
| 376 |
+
dense_indexing=False,
|
| 377 |
+
copy_shape=self.template_mask,
|
| 378 |
+
override_mask=self.template_mask,
|
| 379 |
+
block_ptr=block_ptr,
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
def initialize_range_tree(self, pid_cache):
|
| 383 |
+
super().initialize_range_tree(pid_cache)
|
| 384 |
+
# ignore default codegen
|
| 385 |
+
self.body.clear()
|
| 386 |
+
self.indexing_code.clear()
|
| 387 |
+
|
| 388 |
+
def call_kernel(self, name: str, node: Optional[ir.IRNode] = None):
|
| 389 |
+
wrapper = V.graph.wrapper_code
|
| 390 |
+
_, call_args, _ = self.args.python_argdefs()
|
| 391 |
+
call_args = [str(a) for a in call_args]
|
| 392 |
+
|
| 393 |
+
for i in range(len(call_args)):
|
| 394 |
+
if V.graph.is_unspec_arg(call_args[i]):
|
| 395 |
+
call_args[i] = call_args[i] + ".item()"
|
| 396 |
+
if isinstance(call_args[i], sympy.Symbol):
|
| 397 |
+
call_args[i] = texpr(call_args[i])
|
| 398 |
+
|
| 399 |
+
if V.graph.cpp_wrapper:
|
| 400 |
+
# In the cpp_wrapper case, we have to compute CUDA launch grid at runtime
|
| 401 |
+
# if any dynamic dimension is involved. We rely on the Python version
|
| 402 |
+
# of the grid function to generate those grid configs, which may contain
|
| 403 |
+
# symbolic values. The wrapper will use cexpr to print out C++ code
|
| 404 |
+
# appropriately for the grid configs.
|
| 405 |
+
grid_args = [V.graph.sizevars.simplify(s) for s in self.call_sizes] + [
|
| 406 |
+
self.meta
|
| 407 |
+
]
|
| 408 |
+
grid = self.grid_fn(*grid_args)
|
| 409 |
+
|
| 410 |
+
wrapper.generate_kernel_call(
|
| 411 |
+
name,
|
| 412 |
+
call_args,
|
| 413 |
+
device_index=V.graph.scheduler.current_device.index,
|
| 414 |
+
grid=grid,
|
| 415 |
+
triton_meta=self.triton_meta,
|
| 416 |
+
)
|
| 417 |
+
else:
|
| 418 |
+
stream_name = wrapper.write_get_raw_stream(
|
| 419 |
+
V.graph.scheduler.current_device.index
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
wrapper.add_import_once(f"import {self.grid_fn.__module__}")
|
| 423 |
+
meta = wrapper.add_meta_once(self.meta)
|
| 424 |
+
|
| 425 |
+
grid_call = [
|
| 426 |
+
texpr(V.graph.sizevars.simplify(s)) for s in self.call_sizes
|
| 427 |
+
] + [meta]
|
| 428 |
+
grid_call = f"{self.grid_fn.__module__}.{self.grid_fn.__name__}({', '.join(grid_call)})"
|
| 429 |
+
wrapper.writeline(
|
| 430 |
+
f"{name}.run({', '.join(call_args)}, grid={grid_call}, stream={stream_name})"
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@functools.lru_cache(None)
|
| 435 |
+
def _jinja2_env():
|
| 436 |
+
try:
|
| 437 |
+
import jinja2
|
| 438 |
+
|
| 439 |
+
return jinja2.Environment(
|
| 440 |
+
undefined=jinja2.StrictUndefined,
|
| 441 |
+
)
|
| 442 |
+
except ImportError:
|
| 443 |
+
return None
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
class TritonTemplate(KernelTemplate):
|
| 447 |
+
index_counter = itertools.count()
|
| 448 |
+
all_templates: Dict[str, "TritonTemplate"] = dict()
|
| 449 |
+
|
| 450 |
+
def __init__(self, name: str, grid: Any, source: str, debug=False):
|
| 451 |
+
super().__init__(name)
|
| 452 |
+
self.grid = grid
|
| 453 |
+
self.template = self._template_from_string(source)
|
| 454 |
+
assert name not in self.all_templates, "duplicate template name"
|
| 455 |
+
self.all_templates[name] = self
|
| 456 |
+
self.debug = debug
|
| 457 |
+
|
| 458 |
+
def generate(
|
| 459 |
+
self,
|
| 460 |
+
input_nodes,
|
| 461 |
+
layout,
|
| 462 |
+
num_stages,
|
| 463 |
+
num_warps,
|
| 464 |
+
prefix_args=0,
|
| 465 |
+
suffix_args=0,
|
| 466 |
+
epilogue_fn=identity,
|
| 467 |
+
**kwargs,
|
| 468 |
+
):
|
| 469 |
+
assert self.template, "requires jinja2"
|
| 470 |
+
defines = StringIO()
|
| 471 |
+
for name, val in kwargs.items():
|
| 472 |
+
defines.write(f" {name} : tl.constexpr = {val}\n")
|
| 473 |
+
defines = defines.getvalue()
|
| 474 |
+
|
| 475 |
+
fake_out = ir.Buffer("buf_out", layout)
|
| 476 |
+
kernel_name = f"triton_{self.name}"
|
| 477 |
+
|
| 478 |
+
numel = sympy_product(layout.size)
|
| 479 |
+
buffers = itertools.chain(input_nodes, (fake_out,))
|
| 480 |
+
if not TritonScheduling.can_use_32bit_indexing(numel, buffers):
|
| 481 |
+
raise NotImplementedError(
|
| 482 |
+
"64-bit indexing is not yet implemented for triton templates"
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
kernel_options = dict(
|
| 486 |
+
input_nodes=input_nodes,
|
| 487 |
+
defines=defines,
|
| 488 |
+
num_stages=num_stages,
|
| 489 |
+
num_warps=num_warps,
|
| 490 |
+
grid_fn=self.grid,
|
| 491 |
+
meta=kwargs,
|
| 492 |
+
call_sizes=layout.size,
|
| 493 |
+
prefix_args=prefix_args,
|
| 494 |
+
suffix_args=suffix_args,
|
| 495 |
+
epilogue_fn=epilogue_fn,
|
| 496 |
+
index_dtype="tl.int32",
|
| 497 |
+
)
|
| 498 |
+
with patch.object(
|
| 499 |
+
V.graph, "get_dtype", self._fake_get_dtype(fake_out)
|
| 500 |
+
), TritonTemplateKernel(
|
| 501 |
+
kernel_name=kernel_name,
|
| 502 |
+
output_node=fake_out,
|
| 503 |
+
use_jit=True,
|
| 504 |
+
**kernel_options,
|
| 505 |
+
) as kernel:
|
| 506 |
+
try:
|
| 507 |
+
code = kernel.render(self.template, kwargs).finalize()
|
| 508 |
+
except ZeroDivisionError:
|
| 509 |
+
# TODO(nmacchioni): fix sympy division by zero
|
| 510 |
+
return None
|
| 511 |
+
if self.debug:
|
| 512 |
+
print("Generated Code:\n", code)
|
| 513 |
+
extra = (
|
| 514 |
+
"-".join(
|
| 515 |
+
[
|
| 516 |
+
*[
|
| 517 |
+
f"{kwarg}={repr(kwargs[kwarg])}"
|
| 518 |
+
for kwarg in sorted(kwargs.keys())
|
| 519 |
+
],
|
| 520 |
+
f"num_stages={num_stages}",
|
| 521 |
+
f"num_warps={num_warps}",
|
| 522 |
+
]
|
| 523 |
+
)
|
| 524 |
+
+ "-"
|
| 525 |
+
)
|
| 526 |
+
mod = PyCodeCache.load(code, extra)
|
| 527 |
+
_, call_args, _ = kernel.args.python_argdefs()
|
| 528 |
+
|
| 529 |
+
expected_args = list(unique(x.get_name() for x in input_nodes))
|
| 530 |
+
expected_args.extend([fake_out.get_name()])
|
| 531 |
+
assert list(call_args)[: len(expected_args)] == expected_args, (
|
| 532 |
+
call_args,
|
| 533 |
+
expected_args,
|
| 534 |
+
)
|
| 535 |
+
extra_args = V.graph.sizevars.size_hints(
|
| 536 |
+
map(sympy.expand, call_args[len(expected_args) :]),
|
| 537 |
+
fallback=config.unbacked_symint_fallback,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
kernel_hash_name = f"triton_{self.name}_{next(self.index_counter)}"
|
| 541 |
+
|
| 542 |
+
def make_kernel_render(out_node):
|
| 543 |
+
kernel = TritonTemplateKernel(
|
| 544 |
+
kernel_name=str(Placeholder.KERNEL_NAME),
|
| 545 |
+
output_node=out_node,
|
| 546 |
+
use_jit=False,
|
| 547 |
+
**kernel_options,
|
| 548 |
+
)
|
| 549 |
+
render = functools.partial(
|
| 550 |
+
kernel.render,
|
| 551 |
+
self.template,
|
| 552 |
+
kwargs,
|
| 553 |
+
)
|
| 554 |
+
return kernel, render
|
| 555 |
+
|
| 556 |
+
# create the BenchmarkRequest
|
| 557 |
+
assert mod.__file__ is not None
|
| 558 |
+
grid = self.grid(
|
| 559 |
+
*V.graph.sizevars.size_hints(
|
| 560 |
+
layout.size,
|
| 561 |
+
fallback=config.unbacked_symint_fallback,
|
| 562 |
+
),
|
| 563 |
+
kwargs,
|
| 564 |
+
)
|
| 565 |
+
bmreq = TritonBenchmarkRequest(
|
| 566 |
+
module_path=mod.__file__,
|
| 567 |
+
module_cache_key=mod.key,
|
| 568 |
+
kernel_name=kernel_name,
|
| 569 |
+
grid=grid,
|
| 570 |
+
extra_args=extra_args,
|
| 571 |
+
num_stages=num_stages,
|
| 572 |
+
num_warps=num_warps,
|
| 573 |
+
matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
|
| 574 |
+
input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
|
| 575 |
+
output_tensor_meta=TensorMeta.from_irnodes(layout),
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
return TritonTemplateCaller(
|
| 579 |
+
kernel_hash_name,
|
| 580 |
+
input_nodes,
|
| 581 |
+
layout,
|
| 582 |
+
make_kernel_render,
|
| 583 |
+
extra.strip("-").replace("-", ", "),
|
| 584 |
+
bmreq,
|
| 585 |
+
log_info={
|
| 586 |
+
"tile_shape": str(
|
| 587 |
+
(
|
| 588 |
+
kwargs.get("BLOCK_M", -1),
|
| 589 |
+
kwargs.get("BLOCK_K", -1),
|
| 590 |
+
kwargs.get("BLOCK_N", -1),
|
| 591 |
+
)
|
| 592 |
+
),
|
| 593 |
+
"num_stages": num_stages,
|
| 594 |
+
"num_warps": num_warps,
|
| 595 |
+
"allow_tf32": str(kwargs.get("ALLOW_TF32", None)),
|
| 596 |
+
"acc_type": str(kwargs.get("ACC_TYPE", None)),
|
| 597 |
+
},
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
class ExternKernelChoice:
|
| 602 |
+
def __init__(
|
| 603 |
+
self,
|
| 604 |
+
kernel,
|
| 605 |
+
cpp_kernel=None,
|
| 606 |
+
*,
|
| 607 |
+
name=None,
|
| 608 |
+
has_out_variant=True,
|
| 609 |
+
op_overload=None,
|
| 610 |
+
use_fallback_kernel=False,
|
| 611 |
+
):
|
| 612 |
+
super().__init__()
|
| 613 |
+
name = name or kernel.__name__
|
| 614 |
+
assert callable(kernel)
|
| 615 |
+
assert not hasattr(extern_kernels, name), "duplicate extern kernel"
|
| 616 |
+
self.name = name
|
| 617 |
+
self.cpp_kernel_name = cpp_kernel
|
| 618 |
+
self.has_out_variant = has_out_variant
|
| 619 |
+
setattr(extern_kernels, name, kernel)
|
| 620 |
+
self.op_overload = op_overload
|
| 621 |
+
self.use_fallback_kernel = use_fallback_kernel
|
| 622 |
+
|
| 623 |
+
def to_callable(self):
|
| 624 |
+
return getattr(extern_kernels, self.name)
|
| 625 |
+
|
| 626 |
+
def call_name(self):
|
| 627 |
+
return f"extern_kernels.{self.name}"
|
| 628 |
+
|
| 629 |
+
@functools.lru_cache(None)
|
| 630 |
+
def hash_key(self):
|
| 631 |
+
fn = self.to_callable()
|
| 632 |
+
parts = [
|
| 633 |
+
self.name,
|
| 634 |
+
getattr(fn, "__name__", ""),
|
| 635 |
+
getattr(fn, "__module__", ""),
|
| 636 |
+
]
|
| 637 |
+
try:
|
| 638 |
+
parts.append(inspect.getsource(fn))
|
| 639 |
+
except Exception:
|
| 640 |
+
pass
|
| 641 |
+
return code_hash("-".join(parts))
|
| 642 |
+
|
| 643 |
+
def bind(
|
| 644 |
+
self,
|
| 645 |
+
input_nodes,
|
| 646 |
+
layout,
|
| 647 |
+
ordered_kwargs_for_cpp_kernel=(),
|
| 648 |
+
**kwargs,
|
| 649 |
+
):
|
| 650 |
+
self.ordered_kwargs_for_cpp_kernel = ordered_kwargs_for_cpp_kernel
|
| 651 |
+
return ExternKernelCaller(
|
| 652 |
+
self, input_nodes, layout, kwargs, has_out_variant=self.has_out_variant
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
|
| 656 |
+
class TritonTemplateCaller(ChoiceCaller):
|
| 657 |
+
def __init__(
|
| 658 |
+
self,
|
| 659 |
+
name,
|
| 660 |
+
input_nodes,
|
| 661 |
+
layout,
|
| 662 |
+
make_kernel_render,
|
| 663 |
+
debug_extra,
|
| 664 |
+
bmreq,
|
| 665 |
+
log_info: Optional[
|
| 666 |
+
Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]
|
| 667 |
+
] = None,
|
| 668 |
+
):
|
| 669 |
+
super().__init__(name, input_nodes, layout)
|
| 670 |
+
self.make_kernel_render = make_kernel_render
|
| 671 |
+
self.debug_extra = debug_extra
|
| 672 |
+
self.bmreq: TritonBenchmarkRequest = bmreq
|
| 673 |
+
if log_info is None:
|
| 674 |
+
log_info = {}
|
| 675 |
+
self.log_info: Dict[str, Any] = log_info
|
| 676 |
+
self.log_info.update(
|
| 677 |
+
{
|
| 678 |
+
"backend": "Triton",
|
| 679 |
+
"grid": str(self.bmreq.grid),
|
| 680 |
+
"num_stages": self.bmreq.num_stages,
|
| 681 |
+
"num_warps": self.bmreq.num_warps,
|
| 682 |
+
}
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
def benchmark(self, *args, out):
|
| 686 |
+
assert self.bmreq is not None
|
| 687 |
+
return self.bmreq.benchmark(*args, output_tensor=out)
|
| 688 |
+
|
| 689 |
+
def __str__(self):
|
| 690 |
+
return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})"
|
| 691 |
+
|
| 692 |
+
def call_name(self):
|
| 693 |
+
return f"template_kernels.{self.name}"
|
| 694 |
+
|
| 695 |
+
def hash_key(self):
|
| 696 |
+
return "-".join(
|
| 697 |
+
[
|
| 698 |
+
self.name.rsplit("_", 1)[0],
|
| 699 |
+
self.bmreq.module_cache_key,
|
| 700 |
+
]
|
| 701 |
+
)
|
| 702 |
+
|
| 703 |
+
def output_node(self):
|
| 704 |
+
return ir.TensorBox.create(
|
| 705 |
+
ir.TritonTemplateBuffer(
|
| 706 |
+
layout=self.layout,
|
| 707 |
+
inputs=self.input_nodes,
|
| 708 |
+
make_kernel_render=self.make_kernel_render,
|
| 709 |
+
)
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]:
|
| 713 |
+
"""Information returned here is logged to the autotune log file when that is enabled."""
|
| 714 |
+
return self.log_info
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
class ExternKernelCaller(ChoiceCaller):
|
| 718 |
+
def __init__(
|
| 719 |
+
self,
|
| 720 |
+
choice: ExternKernelChoice,
|
| 721 |
+
input_nodes,
|
| 722 |
+
layout,
|
| 723 |
+
kwargs=None,
|
| 724 |
+
*,
|
| 725 |
+
has_out_variant=True,
|
| 726 |
+
):
|
| 727 |
+
super().__init__(choice.name, input_nodes, layout)
|
| 728 |
+
self.choice = choice
|
| 729 |
+
self.kwargs = kwargs or {}
|
| 730 |
+
self.has_out_variant = has_out_variant
|
| 731 |
+
|
| 732 |
+
def __str__(self):
|
| 733 |
+
return f"ExternKernelCaller({self.choice.call_name()})"
|
| 734 |
+
|
| 735 |
+
def benchmark(self, *args, out):
|
| 736 |
+
if self.has_out_variant:
|
| 737 |
+
return super().benchmark(*args, out=out)
|
| 738 |
+
else:
|
| 739 |
+
algo = self.to_callable()
|
| 740 |
+
out_new = algo(*args)
|
| 741 |
+
torch._C._dynamo.guards.assert_size_stride(
|
| 742 |
+
out_new, tuple(out.size()), tuple(out.stride())
|
| 743 |
+
)
|
| 744 |
+
out.copy_(out_new) # for correctness checking
|
| 745 |
+
return do_bench(lambda: algo(*args))
|
| 746 |
+
|
| 747 |
+
def to_callable(self):
|
| 748 |
+
fn = self.choice.to_callable()
|
| 749 |
+
if self.kwargs:
|
| 750 |
+
return functools.partial(fn, **self.kwargs)
|
| 751 |
+
else:
|
| 752 |
+
return fn
|
| 753 |
+
|
| 754 |
+
def hash_key(self):
|
| 755 |
+
return "-".join(
|
| 756 |
+
[
|
| 757 |
+
self.choice.name,
|
| 758 |
+
*[
|
| 759 |
+
f"{kwarg}={repr(self.kwargs[kwarg])}"
|
| 760 |
+
for kwarg in sorted(self.kwargs.keys())
|
| 761 |
+
],
|
| 762 |
+
self.choice.hash_key(),
|
| 763 |
+
]
|
| 764 |
+
)
|
| 765 |
+
|
| 766 |
+
def output_node(self):
|
| 767 |
+
if config.abi_compatible and self.choice.use_fallback_kernel:
|
| 768 |
+
assert (
|
| 769 |
+
self.choice.op_overload is not None
|
| 770 |
+
), "Please provide an op_overload to use ir.FallbackKernel"
|
| 771 |
+
inner = ir.FallbackKernel.create(
|
| 772 |
+
self.choice.op_overload, *self.input_nodes, **self.kwargs
|
| 773 |
+
)
|
| 774 |
+
else:
|
| 775 |
+
cls = ir.ExternKernelOut if self.has_out_variant else ir.ExternKernelAlloc
|
| 776 |
+
inner = cls(
|
| 777 |
+
layout=self.layout,
|
| 778 |
+
inputs=self.input_nodes,
|
| 779 |
+
python_kernel_name=self.choice.call_name(),
|
| 780 |
+
cpp_kernel_name=self.choice.cpp_kernel_name,
|
| 781 |
+
ordered_kwargs_for_cpp_kernel=self.choice.ordered_kwargs_for_cpp_kernel,
|
| 782 |
+
op_overload=self.choice.op_overload,
|
| 783 |
+
kwargs=self.kwargs,
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
return ir.TensorBox.create(inner)
|
| 787 |
+
|
| 788 |
+
def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]:
|
| 789 |
+
"""Information returned here is logged to the autotune log file when that is enabled."""
|
| 790 |
+
return {
|
| 791 |
+
"backend": "extern",
|
| 792 |
+
"kernel_call_name": self.choice.call_name(),
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
class ErrorFromChoice(RuntimeError):
|
| 797 |
+
def __init__(self, msg, choice: ChoiceCaller, inputs_str):
|
| 798 |
+
msg += f"\nFrom choice {choice}\n{inputs_str}"
|
| 799 |
+
super().__init__(msg)
|
| 800 |
+
self.choice = choice
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
class AlgorithmSelectorCache(PersistentCache):
|
| 804 |
+
def __call__(
|
| 805 |
+
self,
|
| 806 |
+
name,
|
| 807 |
+
choices: List[ChoiceCaller],
|
| 808 |
+
input_nodes,
|
| 809 |
+
layout,
|
| 810 |
+
# optional dict mapping arg indices to the functions
|
| 811 |
+
# generating a torch.Tensor for that input from the
|
| 812 |
+
# corresponding ir.Buffer. if passed for a given
|
| 813 |
+
# arg, the function will be called instead of
|
| 814 |
+
# generating a random torch.Tensor for benchmarking.
|
| 815 |
+
input_gen_fns: Optional[Dict[int, Callable[[ir.Buffer], torch.Tensor]]] = None,
|
| 816 |
+
precompilation_timeout_seconds: int = 60 * 60,
|
| 817 |
+
):
|
| 818 |
+
from .codegen.cuda.cuda_kernel import CUDATemplateCaller
|
| 819 |
+
|
| 820 |
+
# TODO(nmacchioni): remove once CI tests are fixed
|
| 821 |
+
choices = [choice for choice in choices if choice is not None]
|
| 822 |
+
if len(choices) == 0:
|
| 823 |
+
raise RuntimeError(
|
| 824 |
+
"No choices to select, please consider adding ATEN into max_autotune_gemm_backends "
|
| 825 |
+
"config (defined in torch/_inductor/config.py) to allow at least one choice. "
|
| 826 |
+
)
|
| 827 |
+
log.debug("Max autotune selects from %s choices.", str(len(choices)))
|
| 828 |
+
|
| 829 |
+
if len(choices) == 1:
|
| 830 |
+
if not isinstance(choices[0], CUDATemplateCaller):
|
| 831 |
+
# CUDATemplateCaller still needs to go through autotuning process to retrieve workspace size.
|
| 832 |
+
return choices[0].output_node()
|
| 833 |
+
|
| 834 |
+
@functools.lru_cache(None)
|
| 835 |
+
def make_benchmark_fn():
|
| 836 |
+
return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns)
|
| 837 |
+
|
| 838 |
+
def precompile(choices):
|
| 839 |
+
if (
|
| 840 |
+
precompilation_timeout_seconds is None
|
| 841 |
+
or precompilation_timeout_seconds <= 0
|
| 842 |
+
):
|
| 843 |
+
return
|
| 844 |
+
num_workers = min(
|
| 845 |
+
config.compile_threads,
|
| 846 |
+
torch.get_num_threads(),
|
| 847 |
+
len(choices),
|
| 848 |
+
)
|
| 849 |
+
if num_workers <= 0:
|
| 850 |
+
return
|
| 851 |
+
log.info(
|
| 852 |
+
"Multithreaded precompilation for %d choices using %d worker threads",
|
| 853 |
+
len(choices),
|
| 854 |
+
num_workers,
|
| 855 |
+
)
|
| 856 |
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
| 857 |
+
futures = executor.map(
|
| 858 |
+
lambda c: c.precompile(),
|
| 859 |
+
[c for c in choices if hasattr(c, "precompile")],
|
| 860 |
+
timeout=precompilation_timeout_seconds,
|
| 861 |
+
)
|
| 862 |
+
try:
|
| 863 |
+
iterator = iter(futures)
|
| 864 |
+
while True:
|
| 865 |
+
try:
|
| 866 |
+
next(iterator)
|
| 867 |
+
except CUDACompileError:
|
| 868 |
+
log.error( # noqa: G201
|
| 869 |
+
"CUDA Compilation error", exc_info=True
|
| 870 |
+
)
|
| 871 |
+
except TimeoutError:
|
| 872 |
+
log.warning(
|
| 873 |
+
f"Precompilation timed out after {precompilation_timeout_seconds} seconds." # noqa: G004
|
| 874 |
+
)
|
| 875 |
+
except StopIteration:
|
| 876 |
+
pass
|
| 877 |
+
executor.shutdown(wait=True)
|
| 878 |
+
|
| 879 |
+
def autotune(choices):
|
| 880 |
+
try:
|
| 881 |
+
precompile(choices)
|
| 882 |
+
except TimeoutError:
|
| 883 |
+
log.warning(
|
| 884 |
+
"Precompilation phase took longer than timeout allowed. Continuing"
|
| 885 |
+
)
|
| 886 |
+
pass
|
| 887 |
+
return make_benchmark_fn()(choices)
|
| 888 |
+
|
| 889 |
+
if config.autotune_in_subproc:
|
| 890 |
+
from .autotune_process import tuning_pool
|
| 891 |
+
|
| 892 |
+
# do the optional warmup
|
| 893 |
+
tuning_pool.initialize()
|
| 894 |
+
|
| 895 |
+
autotune_start_ts = time.time()
|
| 896 |
+
timings = self.lookup(
|
| 897 |
+
choices,
|
| 898 |
+
name,
|
| 899 |
+
repr([self.key_of(x) for x in input_nodes]),
|
| 900 |
+
autotune,
|
| 901 |
+
)
|
| 902 |
+
autotune_elapse = time.time() - autotune_start_ts
|
| 903 |
+
if timings == {} or choices[0] not in timings:
|
| 904 |
+
return choices[0].output_node()
|
| 905 |
+
|
| 906 |
+
if make_benchmark_fn.cache_info().currsize:
|
| 907 |
+
counters["inductor"]["select_algorithm_autotune"] += 1
|
| 908 |
+
if (
|
| 909 |
+
make_benchmark_fn.cache_info().currsize
|
| 910 |
+
or log.getEffectiveLevel() == logging.DEBUG
|
| 911 |
+
or config.trace.log_autotuning_results
|
| 912 |
+
):
|
| 913 |
+
self.log_results(name, input_nodes, timings, autotune_elapse)
|
| 914 |
+
selected_choice = builtins.min(timings, key=timings.__getitem__).output_node()
|
| 915 |
+
log.debug("selected choice: %s", str(selected_choice))
|
| 916 |
+
return selected_choice
|
| 917 |
+
|
| 918 |
+
@classmethod
|
| 919 |
+
def make_benchmark_fn(
|
| 920 |
+
cls,
|
| 921 |
+
choices,
|
| 922 |
+
input_nodes,
|
| 923 |
+
layout,
|
| 924 |
+
input_gen_fns=None,
|
| 925 |
+
):
|
| 926 |
+
if input_gen_fns is None:
|
| 927 |
+
input_gen_fns = {}
|
| 928 |
+
|
| 929 |
+
# de-duplicate args
|
| 930 |
+
unique_example_inputs = {
|
| 931 |
+
x.get_name(): input_gen_fns.get(i, cls.benchmark_example_value)(x)
|
| 932 |
+
for i, x in enumerate(input_nodes)
|
| 933 |
+
}
|
| 934 |
+
example_inputs = list(unique_example_inputs.values())
|
| 935 |
+
example_inputs_extern = [
|
| 936 |
+
torch.as_strided(
|
| 937 |
+
unique_example_inputs[input_node.get_name()],
|
| 938 |
+
V.graph.sizevars.size_hints(
|
| 939 |
+
input_node.get_size(),
|
| 940 |
+
fallback=config.unbacked_symint_fallback,
|
| 941 |
+
),
|
| 942 |
+
V.graph.sizevars.size_hints(
|
| 943 |
+
input_node.get_stride(),
|
| 944 |
+
fallback=config.unbacked_symint_fallback,
|
| 945 |
+
),
|
| 946 |
+
V.graph.sizevars.size_hint(
|
| 947 |
+
input_node.get_layout().offset,
|
| 948 |
+
fallback=config.unbacked_symint_fallback,
|
| 949 |
+
),
|
| 950 |
+
)
|
| 951 |
+
for input_node in input_nodes
|
| 952 |
+
]
|
| 953 |
+
|
| 954 |
+
out = cls.benchmark_example_value(layout)
|
| 955 |
+
out_extern = torch.as_strided(
|
| 956 |
+
out, out.size(), out.stride(), V.graph.sizevars.size_hint(layout.offset)
|
| 957 |
+
)
|
| 958 |
+
if VERIFY:
|
| 959 |
+
choices[0].benchmark(*example_inputs_extern, out=out_extern)
|
| 960 |
+
expected = out_extern.clone()
|
| 961 |
+
|
| 962 |
+
if DEBUG:
|
| 963 |
+
print(f"{len(choices)} tuning requests:")
|
| 964 |
+
|
| 965 |
+
def debug_str():
|
| 966 |
+
def tensor_repr(x):
|
| 967 |
+
return (
|
| 968 |
+
f"torch.empty_strided({tuple(x.size())!r}, {tuple(x.stride())!r}, "
|
| 969 |
+
f"dtype={x.dtype!r}, device={x.device.type!r})"
|
| 970 |
+
)
|
| 971 |
+
|
| 972 |
+
lines = [
|
| 973 |
+
"inputs = [",
|
| 974 |
+
]
|
| 975 |
+
for x in example_inputs:
|
| 976 |
+
lines.append(f" {tensor_repr(x)},")
|
| 977 |
+
lines += ["]", f"out = {tensor_repr(out)}", ""]
|
| 978 |
+
return "\n".join(lines)
|
| 979 |
+
|
| 980 |
+
def benchmark_choice_in_current_process(choice):
|
| 981 |
+
out.zero_()
|
| 982 |
+
if isinstance(choice, ExternKernelCaller):
|
| 983 |
+
# aten kernels want the offset baked in for sliced tensors
|
| 984 |
+
result = choice.benchmark(*example_inputs_extern, out=out_extern)
|
| 985 |
+
else:
|
| 986 |
+
# triton templates want the base pointer for sliced tensors
|
| 987 |
+
result = choice.benchmark(*example_inputs, out=out)
|
| 988 |
+
if VERIFY:
|
| 989 |
+
torch.testing.assert_close(out_extern, expected, **VERIFY)
|
| 990 |
+
torch.cuda.synchronize() # shake out any CUDA errors
|
| 991 |
+
return result
|
| 992 |
+
|
| 993 |
+
def benchmark_in_current_process(choices):
|
| 994 |
+
timings = {}
|
| 995 |
+
for choice in choices:
|
| 996 |
+
try:
|
| 997 |
+
timing = benchmark_choice_in_current_process(choice)
|
| 998 |
+
except CUDACompileError as e:
|
| 999 |
+
log.warning(
|
| 1000 |
+
"CUDA compilation error: \n%s. \nIgnore this choice.", str(e)
|
| 1001 |
+
)
|
| 1002 |
+
timing = float("inf")
|
| 1003 |
+
except RuntimeError as e:
|
| 1004 |
+
msg = str(e)
|
| 1005 |
+
if "invalid argument" in msg:
|
| 1006 |
+
msg += "\n\nThis may mean this GPU is too small for max_autotune mode.\n\n"
|
| 1007 |
+
log.warning(msg)
|
| 1008 |
+
timing = float("inf")
|
| 1009 |
+
else:
|
| 1010 |
+
if "illegal memory access" in msg:
|
| 1011 |
+
msg += "\n\nEither error in template or triton bug.\n"
|
| 1012 |
+
raise ErrorFromChoice(msg, choice, debug_str()) # noqa: TRY200
|
| 1013 |
+
except AssertionError as e:
|
| 1014 |
+
raise AssertionError( # noqa: TRY200
|
| 1015 |
+
f"Incorrect result from choice {choice}\n\n{e}"
|
| 1016 |
+
)
|
| 1017 |
+
|
| 1018 |
+
timings[choice] = timing
|
| 1019 |
+
|
| 1020 |
+
return timings
|
| 1021 |
+
|
| 1022 |
+
def benchmark_in_sub_process(choices):
|
| 1023 |
+
from . import autotune_process
|
| 1024 |
+
|
| 1025 |
+
# only benchmark triton kernel in sub process for now.
|
| 1026 |
+
# ATen/Extern kernel are still benchmarked in the current process.
|
| 1027 |
+
extern = [c for c in choices if isinstance(c, ExternKernelCaller)]
|
| 1028 |
+
triton = [c for c in choices if not isinstance(c, ExternKernelCaller)]
|
| 1029 |
+
|
| 1030 |
+
timings = benchmark_in_current_process(extern)
|
| 1031 |
+
timings.update(autotune_process.benchmark_in_sub_process(triton))
|
| 1032 |
+
return timings
|
| 1033 |
+
|
| 1034 |
+
benchmark = (
|
| 1035 |
+
benchmark_in_sub_process
|
| 1036 |
+
if config.autotune_in_subproc
|
| 1037 |
+
else benchmark_in_current_process
|
| 1038 |
+
)
|
| 1039 |
+
|
| 1040 |
+
return benchmark
|
| 1041 |
+
|
| 1042 |
+
@staticmethod
|
| 1043 |
+
def log_results(
|
| 1044 |
+
name: str,
|
| 1045 |
+
input_nodes: List[ir.IRNode],
|
| 1046 |
+
timings: Dict[ChoiceCaller, float],
|
| 1047 |
+
elapse: float,
|
| 1048 |
+
):
|
| 1049 |
+
V.debug.log_autotuning_results(name, input_nodes, timings, elapse)
|
| 1050 |
+
if not (config.max_autotune or config.max_autotune_gemm) or not PRINT_AUTOTUNE:
|
| 1051 |
+
return
|
| 1052 |
+
sizes = ", ".join(
|
| 1053 |
+
[
|
| 1054 |
+
"x".join(
|
| 1055 |
+
map(
|
| 1056 |
+
str,
|
| 1057 |
+
V.graph.sizevars.size_hints(
|
| 1058 |
+
n.get_size(), fallback=config.unbacked_symint_fallback
|
| 1059 |
+
),
|
| 1060 |
+
)
|
| 1061 |
+
)
|
| 1062 |
+
for n in input_nodes
|
| 1063 |
+
]
|
| 1064 |
+
)
|
| 1065 |
+
n = None if log.getEffectiveLevel() == logging.DEBUG else 10
|
| 1066 |
+
top_k = sorted(timings, key=timings.__getitem__)[:n]
|
| 1067 |
+
best = top_k[0]
|
| 1068 |
+
best_time = timings[best]
|
| 1069 |
+
sys.stderr.write(f"AUTOTUNE {name}({sizes})\n")
|
| 1070 |
+
for choice in top_k:
|
| 1071 |
+
result = timings[choice]
|
| 1072 |
+
if result:
|
| 1073 |
+
sys.stderr.write(
|
| 1074 |
+
f" {choice.name} {result:.4f} ms {best_time/result:.1%}\n"
|
| 1075 |
+
)
|
| 1076 |
+
else:
|
| 1077 |
+
sys.stderr.write(
|
| 1078 |
+
f" {choice.name} {result:.4f} ms <DIVIDED BY ZERO ERROR>\n"
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
autotune_type_str = (
|
| 1082 |
+
"SubProcess" if config.autotune_in_subproc else "SingleProcess"
|
| 1083 |
+
)
|
| 1084 |
+
sys.stderr.write(f"{autotune_type_str} AUTOTUNE takes {elapse:.4f} seconds\n")
|
| 1085 |
+
|
| 1086 |
+
@staticmethod
|
| 1087 |
+
def benchmark_example_value(node):
|
| 1088 |
+
"""
|
| 1089 |
+
Convert an ir.Buffer into a concrete torch.Tensor we can use for
|
| 1090 |
+
benchmarking.
|
| 1091 |
+
"""
|
| 1092 |
+
if isinstance(node, ir.Layout):
|
| 1093 |
+
node = ir.Buffer("fake", node)
|
| 1094 |
+
# triton templates want the base tensor.
|
| 1095 |
+
if isinstance(node, ir.BaseView):
|
| 1096 |
+
node = node.unwrap_view()
|
| 1097 |
+
# preserve rng states to avoid the rand_strided call below changes
|
| 1098 |
+
# the rng states for the real model code.
|
| 1099 |
+
with preserve_rng_state():
|
| 1100 |
+
return rand_strided(
|
| 1101 |
+
V.graph.sizevars.size_hints(
|
| 1102 |
+
node.get_size(),
|
| 1103 |
+
fallback=config.unbacked_symint_fallback,
|
| 1104 |
+
),
|
| 1105 |
+
V.graph.sizevars.size_hints(
|
| 1106 |
+
node.get_stride(),
|
| 1107 |
+
fallback=config.unbacked_symint_fallback,
|
| 1108 |
+
),
|
| 1109 |
+
device=node.get_device(),
|
| 1110 |
+
dtype=node.get_dtype(),
|
| 1111 |
+
extra_size=node.layout.offset,
|
| 1112 |
+
)
|
| 1113 |
+
|
| 1114 |
+
@staticmethod
|
| 1115 |
+
def key_of(node):
|
| 1116 |
+
"""
|
| 1117 |
+
Extract the pieces of an ir.Buffer that we should invalidate cached
|
| 1118 |
+
autotuning results on.
|
| 1119 |
+
"""
|
| 1120 |
+
sizevars = V.graph.sizevars
|
| 1121 |
+
return (
|
| 1122 |
+
node.get_device().type,
|
| 1123 |
+
str(node.get_dtype()),
|
| 1124 |
+
*sizevars.size_hints(
|
| 1125 |
+
node.get_size(),
|
| 1126 |
+
fallback=config.unbacked_symint_fallback,
|
| 1127 |
+
),
|
| 1128 |
+
*sizevars.size_hints(
|
| 1129 |
+
node.get_stride(),
|
| 1130 |
+
fallback=config.unbacked_symint_fallback,
|
| 1131 |
+
),
|
| 1132 |
+
sizevars.size_hint(
|
| 1133 |
+
node.get_layout().offset,
|
| 1134 |
+
fallback=config.unbacked_symint_fallback,
|
| 1135 |
+
),
|
| 1136 |
+
)
|
| 1137 |
+
|
| 1138 |
+
|
| 1139 |
+
_ALGORITHM_SELECTOR_CACHE: Optional[AlgorithmSelectorCache] = None
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
def autotune_select_algorithm(*args, **kwargs):
|
| 1143 |
+
global _ALGORITHM_SELECTOR_CACHE
|
| 1144 |
+
if _ALGORITHM_SELECTOR_CACHE is None:
|
| 1145 |
+
_ALGORITHM_SELECTOR_CACHE = AlgorithmSelectorCache()
|
| 1146 |
+
return _ALGORITHM_SELECTOR_CACHE(*args, **kwargs)
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
def realize_inputs(*args):
|
| 1150 |
+
if len(args) == 1:
|
| 1151 |
+
return ir.ExternKernel.require_stride1(ir.ExternKernel.realize_input(args[0]))
|
| 1152 |
+
return [realize_inputs(x) for x in args]
|
| 1153 |
+
|
| 1154 |
+
|
| 1155 |
+
# ensure lowering is imported so that `extern_kernels.*` is populated
|
| 1156 |
+
from . import lowering # noqa: F401
|