Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/__pycache__/functional.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/__pycache__/quasirandom.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/__init__.py +112 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/bounds.py +101 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codecache.py +1424 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/compile_fx.py +1284 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/config.py +449 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py +2137 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/debug.py +471 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/decomposition.py +500 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/dependencies.py +360 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/exc.py +87 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_utils.py +30 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/graph.py +988 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/hooks.py +24 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py +73 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/lowering.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/metrics.py +52 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py +118 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/pattern_matcher.py +1169 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py +15 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/scheduler.py +1749 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py +974 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/sizevars.py +576 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/test_operators.py +24 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/triton_helpers.py +182 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/triton_heuristics.py +1046 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/virtualized.py +310 -0
- vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc +3 -0
- vlmpy310/lib/python3.10/site-packages/skimage/future/__init__.py +13 -0
- vlmpy310/lib/python3.10/site-packages/skimage/future/manual_segmentation.py +235 -0
- vlmpy310/lib/python3.10/site-packages/skimage/future/trainable_segmentation.py +164 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/__init__.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_chan_vese.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_clear_border.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_expand_labels.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_felzenszwalb.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_join.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_quickshift.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_watershed.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/active_contour_model.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/boundaries.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/morphsnakes.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/random_walker_segmentation.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/slic_superpixels.cpython-310.pyc +0 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_clear_border.py +109 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb.py +69 -0
- vlmpy310/lib/python3.10/site-packages/skimage/segmentation/morphsnakes.py +449 -0
.gitattributes
CHANGED
|
@@ -1188,3 +1188,4 @@ llava_next/lib/python3.10/site-packages/torch/distributed/__pycache__/distribute
|
|
| 1188 |
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl_compat.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1189 |
vlmpy310/lib/python3.10/site-packages/pyglet/libs/win32/__pycache__/constants.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1190 |
llava_next/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1188 |
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl_compat.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1189 |
vlmpy310/lib/python3.10/site-packages/pyglet/libs/win32/__pycache__/constants.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1190 |
llava_next/lib/python3.10/site-packages/torch/lib/libgomp-a34b3233.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 1191 |
+
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/torch/__pycache__/_namedtensor_internals.cpython-310.pyc
ADDED
|
Binary file (4.95 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/__pycache__/functional.cpython-310.pyc
ADDED
|
Binary file (67.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/__pycache__/quasirandom.cpython-310.pyc
ADDED
|
Binary file (6.87 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/__init__.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional
|
| 2 |
+
|
| 3 |
+
import torch.fx
|
| 4 |
+
|
| 5 |
+
__all__ = ["compile", "list_mode_options", "list_options", "cudagraph_mark_step_begin"]
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def compile(
|
| 9 |
+
gm: torch.fx.GraphModule,
|
| 10 |
+
example_inputs: List[torch.Tensor],
|
| 11 |
+
options: Optional[Dict[str, Any]] = None,
|
| 12 |
+
):
|
| 13 |
+
"""
|
| 14 |
+
Compile a given FX graph with TorchInductor. This allows compiling
|
| 15 |
+
FX graphs captured without using TorchDynamo.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
gm: The FX graph to compile.
|
| 19 |
+
example_inputs: List of tensor inputs.
|
| 20 |
+
options: Optional dict of config options. See `torch._inductor.config`.
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
Callable with same behavior as gm but faster.
|
| 24 |
+
"""
|
| 25 |
+
from .compile_fx import compile_fx
|
| 26 |
+
|
| 27 |
+
return compile_fx(gm, example_inputs, config_patches=options)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def aot_compile(
|
| 31 |
+
gm: torch.fx.GraphModule,
|
| 32 |
+
example_inputs: List[torch.Tensor],
|
| 33 |
+
options: Optional[Dict[str, Any]] = None,
|
| 34 |
+
) -> str:
|
| 35 |
+
"""
|
| 36 |
+
Ahead-of-time compile a given FX graph with TorchInductor into a shared library.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
gm: The FX graph to compile.
|
| 40 |
+
example_inputs: List of tensor inputs.
|
| 41 |
+
options: Optional dict of config options. See `torch._inductor.config`.
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
Path to the generated shared library
|
| 45 |
+
"""
|
| 46 |
+
from .compile_fx import compile_fx_aot
|
| 47 |
+
|
| 48 |
+
result = compile_fx_aot(
|
| 49 |
+
gm,
|
| 50 |
+
example_inputs,
|
| 51 |
+
config_patches=options,
|
| 52 |
+
)()
|
| 53 |
+
lib_path = result[0] if isinstance(result, (list, tuple)) else result
|
| 54 |
+
return lib_path
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def list_mode_options(mode: str = None, dynamic: bool = None) -> Dict[str, Any]:
|
| 58 |
+
r"""Returns a dictionary describing the optimizations that each of the available
|
| 59 |
+
modes passed to `torch.compile()` performs.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
mode (str, optional): The mode to return the optimizations for.
|
| 63 |
+
If None, returns optimizations for all modes
|
| 64 |
+
dynamic (bool, optional): Whether dynamic shape is enabled.
|
| 65 |
+
|
| 66 |
+
Example::
|
| 67 |
+
>>> torch._inductor.list_mode_options()
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
mode_options = {
|
| 71 |
+
"default": {},
|
| 72 |
+
# enable cudagraphs
|
| 73 |
+
"reduce-overhead": {
|
| 74 |
+
"triton.cudagraphs": True,
|
| 75 |
+
},
|
| 76 |
+
# enable max-autotune
|
| 77 |
+
"max-autotune-no-cudagraphs": {
|
| 78 |
+
"max_autotune": True,
|
| 79 |
+
},
|
| 80 |
+
# enable max-autotune
|
| 81 |
+
# enable cudagraphs
|
| 82 |
+
"max-autotune": {
|
| 83 |
+
"max_autotune": True,
|
| 84 |
+
"triton.cudagraphs": True,
|
| 85 |
+
},
|
| 86 |
+
}
|
| 87 |
+
return mode_options[mode] if mode else mode_options
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def list_options() -> Dict[str, Any]:
|
| 91 |
+
r"""Returns a dictionary describing the optimizations and debug configurations
|
| 92 |
+
that are available to `torch.compile()`.
|
| 93 |
+
|
| 94 |
+
The options are documented in `torch._inductor.config`.
|
| 95 |
+
|
| 96 |
+
Example::
|
| 97 |
+
|
| 98 |
+
>>> torch._inductor.list_options()
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
from torch._inductor import config
|
| 102 |
+
|
| 103 |
+
current_config: Dict[str, Any] = config.to_dict() # type: ignore[attr-defined]
|
| 104 |
+
|
| 105 |
+
return list(current_config.keys())
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def cudagraph_mark_step_begin():
|
| 109 |
+
"Indicates that a new iteration of inference or training is about to begin."
|
| 110 |
+
from .cudagraph_trees import mark_step_begin
|
| 111 |
+
|
| 112 |
+
mark_step_begin()
|
llava_next/lib/python3.10/site-packages/torch/_inductor/bounds.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import operator
|
| 2 |
+
from functools import partial
|
| 3 |
+
from typing import Dict, Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch.fx.experimental.symbolic_shapes import free_symbols
|
| 7 |
+
from torch.utils._sympy.value_ranges import bound_sympy, ValueRangeAnalysis, ValueRanges
|
| 8 |
+
from .ir import InterpreterShim, LoopBody
|
| 9 |
+
from .utils import cache_on_self, dominated_nodes
|
| 10 |
+
from .virtualized import V
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BoundVars:
|
| 14 |
+
"""
|
| 15 |
+
Performs Value Range Analysis on LoopBody's fx graph by calling BoundVars.run()
|
| 16 |
+
It exposes the ranges of the nodes in the `bounds` variable
|
| 17 |
+
|
| 18 |
+
Note. A current limitation of this analysis is that it just works on a per-loop basis.
|
| 19 |
+
We should be able to propagate the bounds between across the whole graph. This may benefit
|
| 20 |
+
the case a bounded variable is returned by a kernel and fed into another.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, loop_body: LoopBody):
|
| 24 |
+
self.loop_body = loop_body
|
| 25 |
+
self.replacement_vals = {
|
| 26 |
+
k: ValueRanges(0, v - 1) if not free_symbols(v) else bound_sympy(v)
|
| 27 |
+
for k, v in loop_body.var_ranges.items()
|
| 28 |
+
}
|
| 29 |
+
# avoid computing these values, pessimistically assume that they are unbounded
|
| 30 |
+
self.unbounded_vars = dominated_nodes(
|
| 31 |
+
node
|
| 32 |
+
for node in self.loop_body.get_nodes()
|
| 33 |
+
if node.target in ["load", "reduction", operator.getitem]
|
| 34 |
+
or "masked_subblock" in node.target
|
| 35 |
+
)
|
| 36 |
+
# To access this variable call `get_bounds()`
|
| 37 |
+
self._bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = {}
|
| 38 |
+
|
| 39 |
+
@cache_on_self
|
| 40 |
+
def get_bounds(self):
|
| 41 |
+
submodules = self.swap_submodules(self.loop_body.submodules)
|
| 42 |
+
|
| 43 |
+
# Initialize the environment with the unbounded variables
|
| 44 |
+
for node in self.unbounded_vars:
|
| 45 |
+
# we need to evaluate masked_subblock to recurse, and we need to set indirect values
|
| 46 |
+
if not isinstance(node.target, str) or (
|
| 47 |
+
"masked_subblock" not in node.target
|
| 48 |
+
and "set_indirect" not in node.target
|
| 49 |
+
):
|
| 50 |
+
self._bounds[node] = ValueRanges.unknown()
|
| 51 |
+
|
| 52 |
+
with V.set_ops_handler(ValueRangeAnalysis()):
|
| 53 |
+
interpreter = InterpreterShim(self.loop_body.root_block.graph, submodules)
|
| 54 |
+
interpreter.run(V.get_ops_handler(), initial_env=self._bounds)
|
| 55 |
+
return self._bounds
|
| 56 |
+
|
| 57 |
+
def swap_submodules(self, submodules):
|
| 58 |
+
result = {}
|
| 59 |
+
for key in submodules.keys():
|
| 60 |
+
if key == "get_index":
|
| 61 |
+
result[key] = self.get_index
|
| 62 |
+
elif "masked_subblock" in key:
|
| 63 |
+
subblock = self.loop_body.subblocks[key]
|
| 64 |
+
# The result within the lambda will reference to the final
|
| 65 |
+
# set of modules at the end of the for-loop as it stores a reference to it
|
| 66 |
+
result[key] = lambda mask, value: self.masked_subblock(
|
| 67 |
+
subblock, self._bounds, mask, value, result
|
| 68 |
+
)
|
| 69 |
+
else:
|
| 70 |
+
assert "set_indirect" in key
|
| 71 |
+
idx = int(key[len("set_indirect") :])
|
| 72 |
+
var = self.loop_body.indirect_vars[idx]
|
| 73 |
+
indirect = partial(self.set_indirect, var)
|
| 74 |
+
result[key] = indirect
|
| 75 |
+
|
| 76 |
+
return result
|
| 77 |
+
|
| 78 |
+
def masked_subblock(self, subblock, env, mask, value, submodules):
|
| 79 |
+
interp = InterpreterShim(subblock.graph, submodules)
|
| 80 |
+
interp.run(V.get_ops_handler(), initial_env=env)
|
| 81 |
+
output = [node for node in subblock.graph.nodes if node.target == "output"]
|
| 82 |
+
assert len(output) == 1
|
| 83 |
+
# dont bother unioning with value since the load from buffer will be
|
| 84 |
+
# pessimistically assumed to be inf anyway
|
| 85 |
+
return interp.env[output[0]]
|
| 86 |
+
|
| 87 |
+
def set_indirect(self, old, new):
|
| 88 |
+
assert isinstance(new, ValueRanges)
|
| 89 |
+
self.replacement_vals[old] = new
|
| 90 |
+
return new
|
| 91 |
+
|
| 92 |
+
def get_index(self, name):
|
| 93 |
+
expr = self.loop_body.indexing_exprs[name]
|
| 94 |
+
bound = self.replacement_vals.get(expr)
|
| 95 |
+
if bound is None:
|
| 96 |
+
bound = bound_sympy(expr, self.replacement_vals)
|
| 97 |
+
# The following assertion is true at the time of this writing
|
| 98 |
+
# We don't assert is as to not execute bound_sympy when bound is not None
|
| 99 |
+
# assert bound is None or bound == bound_sympy(expr, self.replacement_vals)
|
| 100 |
+
self.replacement_vals[name] = bound
|
| 101 |
+
return bound
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codecache.py
ADDED
|
@@ -0,0 +1,1424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import getpass
|
| 5 |
+
import hashlib
|
| 6 |
+
import importlib
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import multiprocessing
|
| 10 |
+
import os
|
| 11 |
+
import pathlib
|
| 12 |
+
import platform
|
| 13 |
+
import re
|
| 14 |
+
import shlex
|
| 15 |
+
import shutil
|
| 16 |
+
import signal
|
| 17 |
+
import subprocess
|
| 18 |
+
import sys
|
| 19 |
+
import sysconfig
|
| 20 |
+
import tempfile
|
| 21 |
+
import threading
|
| 22 |
+
import types
|
| 23 |
+
import warnings
|
| 24 |
+
import weakref
|
| 25 |
+
from bisect import bisect_right
|
| 26 |
+
from concurrent.futures import Future, ProcessPoolExecutor, ThreadPoolExecutor
|
| 27 |
+
from ctypes import cdll
|
| 28 |
+
from dataclasses import field
|
| 29 |
+
from functools import partial
|
| 30 |
+
from importlib import abc
|
| 31 |
+
from pathlib import Path
|
| 32 |
+
from threading import Thread
|
| 33 |
+
from time import sleep, time
|
| 34 |
+
from typing import Any, Callable, Dict, List, Set, Union
|
| 35 |
+
|
| 36 |
+
import torch
|
| 37 |
+
|
| 38 |
+
from torch._inductor import config, cuda_properties, exc
|
| 39 |
+
from torch._inductor.utils import developer_warning
|
| 40 |
+
from torch.hub import _Faketqdm, tqdm
|
| 41 |
+
|
| 42 |
+
_HERE = os.path.abspath(__file__)
|
| 43 |
+
_TORCH_PATH = os.path.dirname(os.path.dirname(_HERE))
|
| 44 |
+
|
| 45 |
+
if config.is_fbcode():
|
| 46 |
+
from triton.fb import build_paths
|
| 47 |
+
from triton.fb.build import _run_build_command
|
| 48 |
+
|
| 49 |
+
from torch._inductor.fb.utils import (
|
| 50 |
+
log_global_cache_stats,
|
| 51 |
+
log_global_cache_vals,
|
| 52 |
+
use_global_cache,
|
| 53 |
+
)
|
| 54 |
+
else:
|
| 55 |
+
|
| 56 |
+
def log_global_cache_stats(*args, **kwargs):
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
def log_global_cache_vals(*args, **kwargs):
|
| 60 |
+
pass
|
| 61 |
+
|
| 62 |
+
def use_global_cache():
|
| 63 |
+
return False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
LOCK_TIMEOUT = 600
|
| 67 |
+
|
| 68 |
+
# timing metrics for time spent in the compilation
|
| 69 |
+
_cumulative_compile_time = 0
|
| 70 |
+
_t0 = None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _compile_start():
|
| 74 |
+
global _t0
|
| 75 |
+
if _t0 is None:
|
| 76 |
+
_t0 = time()
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _compile_end():
|
| 80 |
+
global _cumulative_compile_time, _t0
|
| 81 |
+
if _t0 is not None:
|
| 82 |
+
t1 = time()
|
| 83 |
+
_cumulative_compile_time += t1 - _t0
|
| 84 |
+
_t0 = None
|
| 85 |
+
# print("CUMULATIVE COMPILE TIME", _cumulative_compile_time)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
log = logging.getLogger(__name__)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@functools.lru_cache(None)
|
| 92 |
+
def cache_dir():
|
| 93 |
+
cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR")
|
| 94 |
+
if cache_dir is None:
|
| 95 |
+
cache_dir = f"{tempfile.gettempdir()}/torchinductor_{getpass.getuser()}"
|
| 96 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 97 |
+
return cache_dir
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def cpp_wrapper_cache_dir(name):
|
| 101 |
+
cu_str = (
|
| 102 |
+
"cpu"
|
| 103 |
+
if torch.version.cuda is None
|
| 104 |
+
else f'cu{torch.version.cuda.replace(".", "")}'
|
| 105 |
+
)
|
| 106 |
+
python_version = f"py{sys.version_info.major}{sys.version_info.minor}"
|
| 107 |
+
build_folder = f"{python_version}_{cu_str}"
|
| 108 |
+
|
| 109 |
+
cpp_wrapper_dir = os.path.join(cache_dir(), build_folder)
|
| 110 |
+
cpp_wrapper_build_directory = os.path.join(cpp_wrapper_dir, name)
|
| 111 |
+
os.makedirs(cpp_wrapper_build_directory, exist_ok=True)
|
| 112 |
+
return cpp_wrapper_build_directory
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class CacheBase:
|
| 116 |
+
@staticmethod
|
| 117 |
+
@functools.lru_cache(None)
|
| 118 |
+
def get_system():
|
| 119 |
+
try:
|
| 120 |
+
import triton
|
| 121 |
+
|
| 122 |
+
triton_version = triton.__version__
|
| 123 |
+
except ModuleNotFoundError:
|
| 124 |
+
triton_version = None
|
| 125 |
+
|
| 126 |
+
system = {
|
| 127 |
+
"device": {
|
| 128 |
+
"name": torch.cuda.get_device_properties(
|
| 129 |
+
torch.cuda.current_device()
|
| 130 |
+
).name,
|
| 131 |
+
},
|
| 132 |
+
"version": {
|
| 133 |
+
"cuda": torch.version.cuda,
|
| 134 |
+
"triton": triton_version,
|
| 135 |
+
},
|
| 136 |
+
"other": {
|
| 137 |
+
"allow_tf32": torch.backends.cuda.matmul.allow_tf32,
|
| 138 |
+
},
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
system["hash"] = hashlib.sha256(
|
| 142 |
+
json.dumps(system, sort_keys=True).encode("utf-8")
|
| 143 |
+
).hexdigest()
|
| 144 |
+
|
| 145 |
+
return system
|
| 146 |
+
|
| 147 |
+
@staticmethod
|
| 148 |
+
@functools.lru_cache(None)
|
| 149 |
+
def get_local_cache_path():
|
| 150 |
+
return Path(os.path.join(cache_dir(), "cache", CacheBase.get_system()["hash"]))
|
| 151 |
+
|
| 152 |
+
@staticmethod
|
| 153 |
+
@functools.lru_cache(None)
|
| 154 |
+
def get_global_cache_path():
|
| 155 |
+
return (
|
| 156 |
+
Path(os.path.join(config.global_cache_dir, CacheBase.get_system()["hash"]))
|
| 157 |
+
if config.global_cache_dir is not None
|
| 158 |
+
else None
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def __init__(self):
|
| 162 |
+
if not torch.cuda.is_available():
|
| 163 |
+
return
|
| 164 |
+
|
| 165 |
+
self.system = CacheBase.get_system()
|
| 166 |
+
|
| 167 |
+
self.local_cache_path = CacheBase.get_local_cache_path()
|
| 168 |
+
self.global_cache_path = CacheBase.get_global_cache_path()
|
| 169 |
+
|
| 170 |
+
def get_local_cache(self):
|
| 171 |
+
if not self.local_cache_path.is_file():
|
| 172 |
+
return {}
|
| 173 |
+
with open(self.local_cache_path) as local_cache_fp:
|
| 174 |
+
local_cache = json.load(local_cache_fp)
|
| 175 |
+
return local_cache["cache"]
|
| 176 |
+
|
| 177 |
+
def update_local_cache(self, local_cache):
|
| 178 |
+
if not os.path.exists(self.local_cache_path.parent):
|
| 179 |
+
os.makedirs(self.local_cache_path.parent, exist_ok=True)
|
| 180 |
+
write_atomic(
|
| 181 |
+
self.local_cache_path,
|
| 182 |
+
json.dumps({"system": self.system, "cache": local_cache}, indent=4),
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class LocalCache(CacheBase):
|
| 187 |
+
def lookup(self, *keys: List[str]):
|
| 188 |
+
cache = self.get_local_cache()
|
| 189 |
+
|
| 190 |
+
sub_cache = cache
|
| 191 |
+
for key in keys:
|
| 192 |
+
if key in cache:
|
| 193 |
+
sub_cache = cache[key]
|
| 194 |
+
else:
|
| 195 |
+
return None
|
| 196 |
+
|
| 197 |
+
return sub_cache
|
| 198 |
+
|
| 199 |
+
def set_value(self, *keys: List[str], value: Any):
|
| 200 |
+
cache = self.get_local_cache()
|
| 201 |
+
|
| 202 |
+
sub_cache = cache
|
| 203 |
+
for key in keys[0:-1]:
|
| 204 |
+
sub_cache.setdefault(key, {})
|
| 205 |
+
sub_cache = sub_cache[key]
|
| 206 |
+
sub_cache[keys[-1]] = value
|
| 207 |
+
|
| 208 |
+
self.update_local_cache(cache)
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class PersistentCache(CacheBase):
|
| 212 |
+
@functools.lru_cache(None)
|
| 213 |
+
def get_global_cache(self):
|
| 214 |
+
if self.global_cache_path is None or not self.global_cache_path.is_file():
|
| 215 |
+
return {}
|
| 216 |
+
with open(self.global_cache_path) as global_cache_fp:
|
| 217 |
+
global_cache = json.load(global_cache_fp)
|
| 218 |
+
return global_cache["cache"]
|
| 219 |
+
|
| 220 |
+
def lookup(
|
| 221 |
+
self,
|
| 222 |
+
choices,
|
| 223 |
+
name: str,
|
| 224 |
+
inputs: str,
|
| 225 |
+
benchmark: Callable[[Any], float],
|
| 226 |
+
):
|
| 227 |
+
"""
|
| 228 |
+
Check to see if we have benchmarked the given choice callers. For each
|
| 229 |
+
choice caller:
|
| 230 |
+
|
| 231 |
+
1. Check global_cache[name][inputs][choice], return benchmark if cached.
|
| 232 |
+
2. Check local_cache[name][inputs][choice], return benchmark if cached.
|
| 233 |
+
3.
|
| 234 |
+
a. `max_autotune_gemm=True`: benchmark the choice, update
|
| 235 |
+
local_cache[name][inputs][choice], and return the benchmark.
|
| 236 |
+
b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing.
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
log_stats = partial(log_global_cache_stats, self.system, name, inputs)
|
| 240 |
+
log_vals = partial(log_global_cache_vals, self.system, name, inputs)
|
| 241 |
+
timings = {}
|
| 242 |
+
|
| 243 |
+
def check_cache(cache, callback=None):
|
| 244 |
+
"""Check if `cache` contains data for all the choices"""
|
| 245 |
+
hit = True
|
| 246 |
+
for choice in choices:
|
| 247 |
+
choice_hash = choice.hash_key()
|
| 248 |
+
if choice_hash in cache.get(name, {}).get(inputs, {}):
|
| 249 |
+
# cache hit
|
| 250 |
+
timings[choice] = cache[name][inputs][choice_hash]
|
| 251 |
+
else:
|
| 252 |
+
# cache miss
|
| 253 |
+
hit = False
|
| 254 |
+
break
|
| 255 |
+
if callback:
|
| 256 |
+
callback(cached=hit)
|
| 257 |
+
return hit
|
| 258 |
+
|
| 259 |
+
if config.max_autotune or config.max_autotune_gemm:
|
| 260 |
+
local_cache = self.get_local_cache()
|
| 261 |
+
# check local cache first since it is data specific to the current machine
|
| 262 |
+
if not check_cache(local_cache) and not (
|
| 263 |
+
use_global_cache()
|
| 264 |
+
and check_cache(self.get_global_cache(), callback=log_stats)
|
| 265 |
+
):
|
| 266 |
+
# re-benchmark everything to try to get consistent numbers from the same machine
|
| 267 |
+
for choice in choices:
|
| 268 |
+
timings[choice] = benchmark(choice)
|
| 269 |
+
local_cache.setdefault(name, {})
|
| 270 |
+
local_cache[name].setdefault(inputs, {})
|
| 271 |
+
local_cache[name][inputs][choice.hash_key()] = timings[choice]
|
| 272 |
+
|
| 273 |
+
self.update_local_cache(local_cache)
|
| 274 |
+
|
| 275 |
+
if use_global_cache():
|
| 276 |
+
timings_to_log = {
|
| 277 |
+
choice.hash_key(): timings[choice] for choice in choices
|
| 278 |
+
}
|
| 279 |
+
log_vals(timings_to_log)
|
| 280 |
+
elif use_global_cache():
|
| 281 |
+
# only check global cache, not local one
|
| 282 |
+
check_cache(self.get_global_cache(), callback=log_stats)
|
| 283 |
+
# may have a partial cache hit, where not everything is benchmarked
|
| 284 |
+
|
| 285 |
+
return timings
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def get_lock_dir():
|
| 289 |
+
lock_dir = os.path.join(cache_dir(), "locks")
|
| 290 |
+
if not os.path.exists(lock_dir):
|
| 291 |
+
os.makedirs(lock_dir, exist_ok=True)
|
| 292 |
+
return lock_dir
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def code_hash(code, extra: str = ""):
|
| 296 |
+
hashing_str = code
|
| 297 |
+
if extra != "":
|
| 298 |
+
hashing_str = hashing_str + "||" + extra
|
| 299 |
+
return (
|
| 300 |
+
"c"
|
| 301 |
+
+ base64.b32encode(hashlib.sha256(hashing_str.encode("utf-8")).digest())[:51]
|
| 302 |
+
.decode("utf-8")
|
| 303 |
+
.lower()
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def get_path(basename: str, extension: str, specified_dir: str = ""):
|
| 308 |
+
if specified_dir:
|
| 309 |
+
if os.path.isabs(specified_dir):
|
| 310 |
+
subdir = specified_dir
|
| 311 |
+
else:
|
| 312 |
+
subdir = os.path.join(cache_dir(), specified_dir)
|
| 313 |
+
else:
|
| 314 |
+
subdir = os.path.join(cache_dir(), basename[1:3])
|
| 315 |
+
path = os.path.join(subdir, f"{basename}.{extension}")
|
| 316 |
+
return basename, subdir, path
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def get_hash(content: Union[str, bytes], extra: str = "", hash_type: str = "code"):
|
| 320 |
+
assert hash_type in ["code", "cubin"], "Hash type not supported"
|
| 321 |
+
if hash_type == "code":
|
| 322 |
+
return code_hash(content, extra)
|
| 323 |
+
if hash_type == "cubin":
|
| 324 |
+
return code_hash(repr(content))
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def write(
|
| 328 |
+
content: Union[str, bytes],
|
| 329 |
+
extension: str,
|
| 330 |
+
extra: str = "",
|
| 331 |
+
hash_type: str = "code",
|
| 332 |
+
specified_dir: str = "",
|
| 333 |
+
):
|
| 334 |
+
key: str = get_hash(content, extra, hash_type)
|
| 335 |
+
basename, subdir, path = get_path(key, extension, specified_dir)
|
| 336 |
+
if not os.path.exists(subdir):
|
| 337 |
+
os.makedirs(subdir, exist_ok=True)
|
| 338 |
+
if not os.path.exists(path):
|
| 339 |
+
write_atomic(path, content)
|
| 340 |
+
return basename, path
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def write_atomic(path: str, content: Union[str, bytes]):
|
| 344 |
+
# Write into temporary file first to avoid conflicts between threads
|
| 345 |
+
# Avoid using a named temporary file, as those have restricted permissions
|
| 346 |
+
assert isinstance(
|
| 347 |
+
content, (str, bytes)
|
| 348 |
+
), "Only strings and byte arrays can be saved in the cache"
|
| 349 |
+
path = pathlib.Path(path)
|
| 350 |
+
tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp"
|
| 351 |
+
write_mode = "w" if isinstance(content, str) else "wb"
|
| 352 |
+
with tmp_path.open(write_mode) as f:
|
| 353 |
+
f.write(content)
|
| 354 |
+
tmp_path.rename(path)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
@dataclasses.dataclass
|
| 358 |
+
class CompiledFxGraph:
|
| 359 |
+
"""Class holding a compiled FX graph"""
|
| 360 |
+
|
| 361 |
+
compiled_artifact: Callable = None
|
| 362 |
+
current_callable: Callable = None
|
| 363 |
+
cache_key: str = None
|
| 364 |
+
artifact_path: str = None
|
| 365 |
+
cache_linemap: List = None
|
| 366 |
+
device_types: Set[str] = field(default_factory=set)
|
| 367 |
+
device_idxs: Set[int] = field(default_factory=set)
|
| 368 |
+
mutated_inputs: Set[str] = field(default_factory=set)
|
| 369 |
+
mutated_input_idxs: Set[int] = field(default_factory=list)
|
| 370 |
+
|
| 371 |
+
_boxed_call: bool = None
|
| 372 |
+
|
| 373 |
+
def __call__(self, inputs) -> Any:
|
| 374 |
+
return self.get_current_callable()(inputs)
|
| 375 |
+
|
| 376 |
+
def get_current_callable(self):
|
| 377 |
+
if self.current_callable is None:
|
| 378 |
+
# This prevents a circular reference that makes CompiledFxGraph
|
| 379 |
+
# get stuck without getting garbage collected
|
| 380 |
+
return functools.partial(_run_from_cache, weakref.proxy(self))
|
| 381 |
+
else:
|
| 382 |
+
return self.current_callable
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def _run_from_cache(compiled_graph: CompiledFxGraph, inputs):
|
| 386 |
+
# We can't really serialize callables that may be C++/Triton/etc.,
|
| 387 |
+
# so we serialize their disk cache location instead
|
| 388 |
+
# TODO: When making an API that can save compiled models e2e to disk
|
| 389 |
+
# this will need to be better
|
| 390 |
+
if compiled_graph.compiled_artifact is None:
|
| 391 |
+
from .codecache import PyCodeCache
|
| 392 |
+
|
| 393 |
+
compiled_graph.compiled_artifact = PyCodeCache.load_by_key_path(
|
| 394 |
+
compiled_graph.cache_key,
|
| 395 |
+
compiled_graph.artifact_path,
|
| 396 |
+
compiled_graph.cache_linemap
|
| 397 |
+
if compiled_graph.cache_linemap is not None
|
| 398 |
+
else (),
|
| 399 |
+
).call
|
| 400 |
+
|
| 401 |
+
return compiled_graph.compiled_artifact(inputs)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def cpp_compiler():
|
| 405 |
+
if config.is_fbcode():
|
| 406 |
+
return build_paths.gcc()
|
| 407 |
+
if isinstance(config.cpp.cxx, (list, tuple)):
|
| 408 |
+
search = tuple(config.cpp.cxx)
|
| 409 |
+
else:
|
| 410 |
+
search = (config.cpp.cxx,)
|
| 411 |
+
return cpp_compiler_search(search)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
@functools.lru_cache(1)
|
| 415 |
+
def cpp_compiler_search(search):
|
| 416 |
+
for cxx in search:
|
| 417 |
+
try:
|
| 418 |
+
if cxx is None:
|
| 419 |
+
# gxx package is only available for Linux
|
| 420 |
+
# according to https://anaconda.org/conda-forge/gxx/
|
| 421 |
+
if sys.platform != "linux":
|
| 422 |
+
continue
|
| 423 |
+
# Do not install GXX by default
|
| 424 |
+
if not os.getenv("TORCH_INDUCTOR_INSTALL_GXX"):
|
| 425 |
+
continue
|
| 426 |
+
from filelock import FileLock
|
| 427 |
+
|
| 428 |
+
lock_dir = get_lock_dir()
|
| 429 |
+
lock = FileLock(
|
| 430 |
+
os.path.join(lock_dir, "g++.lock"), timeout=LOCK_TIMEOUT
|
| 431 |
+
)
|
| 432 |
+
with lock:
|
| 433 |
+
cxx = install_gcc_via_conda()
|
| 434 |
+
subprocess.check_output([cxx, "--version"])
|
| 435 |
+
return cxx
|
| 436 |
+
except (subprocess.SubprocessError, FileNotFoundError, ImportError):
|
| 437 |
+
continue
|
| 438 |
+
raise exc.InvalidCxxCompiler()
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def install_gcc_via_conda():
|
| 442 |
+
"""On older systems, this is a quick way to get a modern compiler"""
|
| 443 |
+
prefix = os.path.join(cache_dir(), "gcc")
|
| 444 |
+
cxx_path = os.path.join(prefix, "bin", "g++")
|
| 445 |
+
if not os.path.exists(cxx_path):
|
| 446 |
+
log.info("Downloading GCC via conda")
|
| 447 |
+
conda = os.environ.get("CONDA_EXE", "conda")
|
| 448 |
+
if conda is None:
|
| 449 |
+
conda = shutil.which("conda")
|
| 450 |
+
if conda is not None:
|
| 451 |
+
subprocess.check_call(
|
| 452 |
+
[
|
| 453 |
+
conda,
|
| 454 |
+
"create",
|
| 455 |
+
f"--prefix={prefix}",
|
| 456 |
+
"--channel=conda-forge",
|
| 457 |
+
"--quiet",
|
| 458 |
+
"-y",
|
| 459 |
+
"python=3.8",
|
| 460 |
+
"gxx",
|
| 461 |
+
],
|
| 462 |
+
stdout=subprocess.PIPE,
|
| 463 |
+
)
|
| 464 |
+
return cxx_path
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def is_gcc():
|
| 468 |
+
return re.search(r"(gcc|g\+\+)", cpp_compiler())
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@functools.lru_cache(None)
|
| 472 |
+
def is_apple_clang():
|
| 473 |
+
cxx = cpp_compiler()
|
| 474 |
+
version_string = subprocess.check_output([cxx, "--version"]).decode("utf8")
|
| 475 |
+
return "Apple" in version_string.splitlines()[0]
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class VecISA:
|
| 479 |
+
_bit_width: int
|
| 480 |
+
_macro: str
|
| 481 |
+
_arch_flags: str
|
| 482 |
+
_dtype_nelements: Dict[torch.dtype, int]
|
| 483 |
+
|
| 484 |
+
# Note [Checking for Vectorized Support in Inductor]
|
| 485 |
+
# TorchInductor CPU vectorization reuses PyTorch vectorization utility functions
|
| 486 |
+
# Hence, TorchInductor would depend on Sleef* to accelerate mathematical functions
|
| 487 |
+
# like exp, pow, sin, cos and etc.
|
| 488 |
+
# But PyTorch and TorchInductor might use different compilers to build code. If
|
| 489 |
+
# PyTorch uses gcc-7/g++-7 to build the release package, the libtorch_cpu.so
|
| 490 |
+
# will not expose the Sleef* AVX512 symbols since gcc-7/g++-7 cannot pass
|
| 491 |
+
# avx512 check in CMake - FindAVX.cmake. But TorchInductor install the latest
|
| 492 |
+
# gcc/g++ compiler by default while it could support the AVX512 compilation.
|
| 493 |
+
# Therefore, there would be a conflict sleef version between PyTorch and
|
| 494 |
+
# TorchInductor. Hence, we dry-compile the following code to check whether current
|
| 495 |
+
# HW platform and PyTorch both could support AVX512 or AVX2. And suppose ARM
|
| 496 |
+
# also needs the logic
|
| 497 |
+
# In fbcode however, we are using the same compiler for pytorch and for inductor codegen,
|
| 498 |
+
# making the runtime check unnecessary.
|
| 499 |
+
_avx_code = """
|
| 500 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
|
| 501 |
+
#include <ATen/cpu/vec/functional.h>
|
| 502 |
+
#include <ATen/cpu/vec/vec.h>
|
| 503 |
+
#endif
|
| 504 |
+
|
| 505 |
+
__attribute__((aligned(64))) float in_out_ptr0[16] = {0.0};
|
| 506 |
+
|
| 507 |
+
extern "C" void __avx_chk_kernel() {
|
| 508 |
+
auto tmp0 = at::vec::Vectorized<float>(1);
|
| 509 |
+
auto tmp1 = tmp0.exp();
|
| 510 |
+
tmp1.store(in_out_ptr0);
|
| 511 |
+
}
|
| 512 |
+
"""
|
| 513 |
+
|
| 514 |
+
_avx_py_load = """
|
| 515 |
+
import torch
|
| 516 |
+
from ctypes import cdll
|
| 517 |
+
cdll.LoadLibrary("__lib_path__")
|
| 518 |
+
"""
|
| 519 |
+
|
| 520 |
+
def bit_width(self):
|
| 521 |
+
return self._bit_width
|
| 522 |
+
|
| 523 |
+
def nelements(self, dtype: torch.dtype = torch.float):
|
| 524 |
+
return self._dtype_nelements[dtype]
|
| 525 |
+
|
| 526 |
+
def build_macro(self):
|
| 527 |
+
return self._macro
|
| 528 |
+
|
| 529 |
+
def build_arch_flags(self):
|
| 530 |
+
return self._arch_flags
|
| 531 |
+
|
| 532 |
+
def __hash__(self) -> int:
|
| 533 |
+
return hash(str(self))
|
| 534 |
+
|
| 535 |
+
@functools.lru_cache(None)
|
| 536 |
+
def __bool__(self):
|
| 537 |
+
if config.cpp.vec_isa_ok is not None:
|
| 538 |
+
return config.cpp.vec_isa_ok
|
| 539 |
+
|
| 540 |
+
key, input_path = write(VecISA._avx_code, "cpp")
|
| 541 |
+
from filelock import FileLock
|
| 542 |
+
|
| 543 |
+
lock_dir = get_lock_dir()
|
| 544 |
+
lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
|
| 545 |
+
with lock:
|
| 546 |
+
output_path = input_path[:-3] + "so"
|
| 547 |
+
build_cmd = shlex.split(
|
| 548 |
+
cpp_compile_command(
|
| 549 |
+
input_path, output_path, warning_all=False, vec_isa=self
|
| 550 |
+
)
|
| 551 |
+
)
|
| 552 |
+
try:
|
| 553 |
+
# Check build result
|
| 554 |
+
compile_file(input_path, output_path, build_cmd)
|
| 555 |
+
subprocess.check_call(
|
| 556 |
+
[
|
| 557 |
+
sys.executable,
|
| 558 |
+
"-c",
|
| 559 |
+
VecISA._avx_py_load.replace("__lib_path__", output_path),
|
| 560 |
+
],
|
| 561 |
+
stderr=subprocess.DEVNULL,
|
| 562 |
+
env={**os.environ, "PYTHONPATH": ":".join(sys.path)},
|
| 563 |
+
)
|
| 564 |
+
except Exception as e:
|
| 565 |
+
return False
|
| 566 |
+
|
| 567 |
+
return True
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
@dataclasses.dataclass
|
| 571 |
+
class VecAVX512(VecISA):
|
| 572 |
+
_bit_width = 512
|
| 573 |
+
_macro = "CPU_CAPABILITY_AVX512"
|
| 574 |
+
_arch_flags = "-mavx512f -mavx512dq -mavx512vl -mavx512bw -mfma"
|
| 575 |
+
_dtype_nelements = {torch.float: 16, torch.bfloat16: 32, torch.float16: 32}
|
| 576 |
+
|
| 577 |
+
def __str__(self) -> str:
|
| 578 |
+
return "avx512"
|
| 579 |
+
|
| 580 |
+
__hash__: Callable[[VecISA], Any] = VecISA.__hash__
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
@dataclasses.dataclass
|
| 584 |
+
class VecAVX2(VecISA):
|
| 585 |
+
_bit_width = 256
|
| 586 |
+
_macro = "CPU_CAPABILITY_AVX2"
|
| 587 |
+
_arch_flags = "-mavx2 -mfma"
|
| 588 |
+
_dtype_nelements = {torch.float: 8, torch.bfloat16: 16, torch.float16: 16}
|
| 589 |
+
|
| 590 |
+
def __str__(self) -> str:
|
| 591 |
+
return "avx2"
|
| 592 |
+
|
| 593 |
+
__hash__: Callable[[VecISA], Any] = VecISA.__hash__
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
class InvalidVecISA(VecISA):
|
| 597 |
+
_bit_width = 0
|
| 598 |
+
_macro = ""
|
| 599 |
+
_arch_flags = ""
|
| 600 |
+
_dtype_nelements = {}
|
| 601 |
+
|
| 602 |
+
def __str__(self) -> str:
|
| 603 |
+
return "INVALID_VEC_ISA"
|
| 604 |
+
|
| 605 |
+
def __bool__(self):
|
| 606 |
+
return False
|
| 607 |
+
|
| 608 |
+
__hash__: Callable[[VecISA], Any] = VecISA.__hash__
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
invalid_vec_isa = InvalidVecISA()
|
| 612 |
+
supported_vec_isa_list = [VecAVX512(), VecAVX2()]
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
# Cache the cpuinfo to avoid I/O overhead. Meanwhile, the cpuinfo content
|
| 616 |
+
# might have too much redundant content that is useless for ISA check. Hence,
|
| 617 |
+
# we only cache some key isa information.
|
| 618 |
+
@functools.lru_cache(None)
|
| 619 |
+
def valid_vec_isa_list():
|
| 620 |
+
if sys.platform != "linux":
|
| 621 |
+
return []
|
| 622 |
+
|
| 623 |
+
isa_list = []
|
| 624 |
+
with open("/proc/cpuinfo") as _cpu_info:
|
| 625 |
+
_cpu_info_content = _cpu_info.read()
|
| 626 |
+
for isa in supported_vec_isa_list:
|
| 627 |
+
if str(isa) in _cpu_info_content and isa:
|
| 628 |
+
isa_list.append(isa)
|
| 629 |
+
return isa_list
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def pick_vec_isa():
|
| 633 |
+
_valid_vec_isa_list: List[VecISA] = valid_vec_isa_list()
|
| 634 |
+
if not _valid_vec_isa_list:
|
| 635 |
+
return invalid_vec_isa
|
| 636 |
+
|
| 637 |
+
# If the simdlen is None, it indicates determin the vectorization length automatically
|
| 638 |
+
if config.cpp.simdlen is None:
|
| 639 |
+
assert _valid_vec_isa_list
|
| 640 |
+
return _valid_vec_isa_list[0]
|
| 641 |
+
|
| 642 |
+
for isa in _valid_vec_isa_list:
|
| 643 |
+
if config.cpp.simdlen == isa.bit_width():
|
| 644 |
+
return isa
|
| 645 |
+
|
| 646 |
+
return invalid_vec_isa
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def get_shared(shared=True):
|
| 650 |
+
return "-shared -fPIC" if shared else ""
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def get_warning_all_flag(warning_all=True):
|
| 654 |
+
return "-Wall" if warning_all else ""
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
def cpp_flags():
|
| 658 |
+
return "-std=c++17 -Wno-unused-variable"
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
def cpp_wrapper_flags():
|
| 662 |
+
return "-DTORCH_INDUCTOR_CPP_WRAPPER"
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
def optimization_flags():
|
| 666 |
+
base_flags = "-O3 -ffast-math -fno-finite-math-only"
|
| 667 |
+
if config.is_fbcode():
|
| 668 |
+
# FIXME: passing `-fopenmp` adds libgomp.so to the generated shared library's dependencies.
|
| 669 |
+
# This causes `ldopen` to fail in fbcode, because libgomp does not exist in the default paths.
|
| 670 |
+
# We will fix it later by exposing the lib path.
|
| 671 |
+
return base_flags
|
| 672 |
+
|
| 673 |
+
if sys.platform == "darwin":
|
| 674 |
+
# Per https://mac.r-project.org/openmp/ right way to pass `openmp` flags to MacOS is via `-Xclang`
|
| 675 |
+
# Also, `-march=native` is unrecognized option on M1
|
| 676 |
+
base_flags += " -Xclang"
|
| 677 |
+
else:
|
| 678 |
+
if platform.machine() == "ppc64le":
|
| 679 |
+
base_flags += " -mcpu=native"
|
| 680 |
+
else:
|
| 681 |
+
base_flags += " -march=native"
|
| 682 |
+
|
| 683 |
+
# Internal cannot find libgomp.so
|
| 684 |
+
if not config.is_fbcode():
|
| 685 |
+
base_flags += " -fopenmp"
|
| 686 |
+
return base_flags
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def use_custom_generated_macros():
|
| 690 |
+
return "-D C10_USING_CUSTOM_GENERATED_MACROS"
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
def use_fb_internal_macros():
|
| 694 |
+
if config.is_fbcode():
|
| 695 |
+
openmp_lib = build_paths.openmp_lib()
|
| 696 |
+
return f"-Wp,-fopenmp {openmp_lib} -D C10_USE_GLOG -D C10_USE_MINIMAL_GLOG"
|
| 697 |
+
else:
|
| 698 |
+
return ""
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def use_standard_sys_dir_headers():
|
| 702 |
+
if config.is_fbcode():
|
| 703 |
+
return "-nostdinc"
|
| 704 |
+
else:
|
| 705 |
+
return ""
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
@functools.lru_cache(None)
|
| 709 |
+
def is_conda_llvm_openmp_installed():
|
| 710 |
+
try:
|
| 711 |
+
command = "conda list llvm-openmp --json"
|
| 712 |
+
output = subprocess.check_output(command.split()).decode("utf8")
|
| 713 |
+
return len(json.loads(output)) > 0
|
| 714 |
+
except subprocess.SubprocessError:
|
| 715 |
+
return False
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
@functools.lru_cache(None)
|
| 719 |
+
def homebrew_libomp():
|
| 720 |
+
try:
|
| 721 |
+
# check if `brew` is installed
|
| 722 |
+
subprocess.check_output(["which", "brew"])
|
| 723 |
+
# get the location of `libomp` if it is installed
|
| 724 |
+
# this is the location that `libomp` **would** be installed
|
| 725 |
+
# see https://github.com/Homebrew/brew/issues/10261#issuecomment-756563567 for details
|
| 726 |
+
libomp_path = (
|
| 727 |
+
subprocess.check_output(["brew", "--prefix", "libomp"])
|
| 728 |
+
.decode("utf8")
|
| 729 |
+
.strip()
|
| 730 |
+
)
|
| 731 |
+
# check if `libomp` is installed
|
| 732 |
+
omp_available = os.path.exists(libomp_path)
|
| 733 |
+
return omp_available, libomp_path
|
| 734 |
+
except subprocess.SubprocessError:
|
| 735 |
+
return False, ""
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def get_include_and_linking_paths(
|
| 739 |
+
include_pytorch=False, vec_isa: VecISA = invalid_vec_isa, cuda=False, aot_mode=False
|
| 740 |
+
):
|
| 741 |
+
if (
|
| 742 |
+
config.is_fbcode()
|
| 743 |
+
and "CUDA_HOME" not in os.environ
|
| 744 |
+
and "CUDA_PATH" not in os.environ
|
| 745 |
+
):
|
| 746 |
+
os.environ["CUDA_HOME"] = os.path.dirname(build_paths.cuda())
|
| 747 |
+
from torch.utils import cpp_extension
|
| 748 |
+
|
| 749 |
+
if aot_mode and config.is_fbcode():
|
| 750 |
+
# Hack. The AOT inductor libs reference CUDA, so let's just include it for now.
|
| 751 |
+
cuda = True
|
| 752 |
+
|
| 753 |
+
macros = ""
|
| 754 |
+
if sys.platform == "linux" and (
|
| 755 |
+
include_pytorch
|
| 756 |
+
or vec_isa != invalid_vec_isa
|
| 757 |
+
or cuda
|
| 758 |
+
or config.cpp.enable_kernel_profile
|
| 759 |
+
):
|
| 760 |
+
# Note - We include pytorch only on linux right now. There is more work
|
| 761 |
+
# to do to enable OMP build on darwin where PyTorch is built with IOMP
|
| 762 |
+
# and we need a way to link to what PyTorch links.
|
| 763 |
+
ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
|
| 764 |
+
lpaths = cpp_extension.library_paths(cuda) + [
|
| 765 |
+
sysconfig.get_config_var("LIBDIR")
|
| 766 |
+
]
|
| 767 |
+
libs = []
|
| 768 |
+
# No need to manually specify libraries in fbcode.
|
| 769 |
+
if not config.is_fbcode():
|
| 770 |
+
libs += ["c10", "torch", "torch_cpu"]
|
| 771 |
+
libs += ["gomp"]
|
| 772 |
+
if not aot_mode:
|
| 773 |
+
libs += ["torch_python"]
|
| 774 |
+
else:
|
| 775 |
+
# internal remote execution is able to find omp, but not gomp
|
| 776 |
+
libs += ["omp"]
|
| 777 |
+
if aot_mode:
|
| 778 |
+
ipaths += [os.path.dirname(cpp_prefix_path())]
|
| 779 |
+
macros = vec_isa.build_macro()
|
| 780 |
+
if macros:
|
| 781 |
+
if config.is_fbcode() and vec_isa != invalid_vec_isa:
|
| 782 |
+
cap = str(vec_isa).upper()
|
| 783 |
+
macros = " ".join(
|
| 784 |
+
[
|
| 785 |
+
vec_isa.build_arch_flags(),
|
| 786 |
+
f"-D CPU_CAPABILITY={cap}",
|
| 787 |
+
f"-D CPU_CAPABILITY_{cap}",
|
| 788 |
+
f"-D HAVE_{cap}_CPU_DEFINITION",
|
| 789 |
+
]
|
| 790 |
+
)
|
| 791 |
+
else:
|
| 792 |
+
macros = f"-D{macros}"
|
| 793 |
+
if cuda:
|
| 794 |
+
if config.is_fbcode():
|
| 795 |
+
libs += ["cuda"]
|
| 796 |
+
else:
|
| 797 |
+
libs += ["c10_cuda", "cuda", "torch_cuda"]
|
| 798 |
+
else:
|
| 799 |
+
# Note - this is effectively a header only inclusion. Usage of some header files may result in
|
| 800 |
+
# symbol not found, if those header files require a library.
|
| 801 |
+
# For those cases, include the lpath and libs command as we do for pytorch above.
|
| 802 |
+
# This approach allows us to only pay for what we use.
|
| 803 |
+
ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
|
| 804 |
+
lpaths = []
|
| 805 |
+
if sys.platform == "darwin":
|
| 806 |
+
# only Apple builtin compilers (Apple Clang++) require openmp
|
| 807 |
+
omp_available = not is_apple_clang()
|
| 808 |
+
|
| 809 |
+
# check the `OMP_PREFIX` environment first
|
| 810 |
+
if os.getenv("OMP_PREFIX") is not None:
|
| 811 |
+
header_path = os.path.join(os.getenv("OMP_PREFIX"), "include", "omp.h")
|
| 812 |
+
valid_env = os.path.exists(header_path)
|
| 813 |
+
if valid_env:
|
| 814 |
+
ipaths.append(os.path.join(os.getenv("OMP_PREFIX"), "include"))
|
| 815 |
+
lpaths.append(os.path.join(os.getenv("OMP_PREFIX"), "lib"))
|
| 816 |
+
else:
|
| 817 |
+
warnings.warn("environment variable `OMP_PREFIX` is invalid.")
|
| 818 |
+
omp_available = omp_available or valid_env
|
| 819 |
+
|
| 820 |
+
libs = [] if omp_available else ["omp"]
|
| 821 |
+
|
| 822 |
+
# prefer to use openmp from `conda install llvm-openmp`
|
| 823 |
+
if not omp_available and os.getenv("CONDA_PREFIX") is not None:
|
| 824 |
+
omp_available = is_conda_llvm_openmp_installed()
|
| 825 |
+
if omp_available:
|
| 826 |
+
conda_lib_path = os.path.join(os.getenv("CONDA_PREFIX"), "lib")
|
| 827 |
+
ipaths.append(os.path.join(os.getenv("CONDA_PREFIX"), "include"))
|
| 828 |
+
lpaths.append(conda_lib_path)
|
| 829 |
+
# Prefer Intel OpenMP on x86 machine
|
| 830 |
+
if os.uname().machine == "x86_64" and os.path.exists(
|
| 831 |
+
os.path.join(conda_lib_path, "libiomp5.dylib")
|
| 832 |
+
):
|
| 833 |
+
libs = ["iomp5"]
|
| 834 |
+
|
| 835 |
+
# next, try to use openmp from `brew install libomp`
|
| 836 |
+
if not omp_available:
|
| 837 |
+
omp_available, libomp_path = homebrew_libomp()
|
| 838 |
+
if omp_available:
|
| 839 |
+
ipaths.append(os.path.join(libomp_path, "include"))
|
| 840 |
+
lpaths.append(os.path.join(libomp_path, "lib"))
|
| 841 |
+
|
| 842 |
+
# if openmp is still not available, we let the compiler to have a try,
|
| 843 |
+
# and raise error together with instructions at compilation error later
|
| 844 |
+
else:
|
| 845 |
+
libs = ["omp"] if config.is_fbcode() else ["gomp"]
|
| 846 |
+
|
| 847 |
+
# third party libs
|
| 848 |
+
if config.is_fbcode():
|
| 849 |
+
ipaths.append(build_paths.sleef())
|
| 850 |
+
ipaths.append(build_paths.openmp())
|
| 851 |
+
ipaths.append(build_paths.gcc_include())
|
| 852 |
+
ipaths.append(build_paths.libgcc())
|
| 853 |
+
ipaths.append(build_paths.libgcc_arch())
|
| 854 |
+
ipaths.append(build_paths.libgcc_backward())
|
| 855 |
+
ipaths.append(build_paths.glibc())
|
| 856 |
+
ipaths.append(build_paths.linux_kernel())
|
| 857 |
+
ipaths.append(build_paths.gcc_install_tools_include())
|
| 858 |
+
# We also need to bundle includes with absolute paths into a remote directory
|
| 859 |
+
# (later on, we copy the include paths from cpp_extensions into our remote dir)
|
| 860 |
+
ipaths.append("include")
|
| 861 |
+
|
| 862 |
+
ipaths = " ".join(["-I" + p for p in ipaths])
|
| 863 |
+
lpaths = " ".join(["-L" + p for p in lpaths])
|
| 864 |
+
libs = " ".join(["-l" + p for p in libs])
|
| 865 |
+
return ipaths, lpaths, libs, macros
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
def cpp_compile_command(
|
| 869 |
+
input,
|
| 870 |
+
output,
|
| 871 |
+
warning_all=True,
|
| 872 |
+
shared=True,
|
| 873 |
+
include_pytorch=False,
|
| 874 |
+
vec_isa: VecISA = invalid_vec_isa,
|
| 875 |
+
cuda=False,
|
| 876 |
+
aot_mode=False,
|
| 877 |
+
):
|
| 878 |
+
ipaths, lpaths, libs, macros = get_include_and_linking_paths(
|
| 879 |
+
include_pytorch, vec_isa, cuda, aot_mode
|
| 880 |
+
)
|
| 881 |
+
if config.is_fbcode():
|
| 882 |
+
if aot_mode:
|
| 883 |
+
inp_name = input
|
| 884 |
+
out_name = output
|
| 885 |
+
else:
|
| 886 |
+
# We need to copy any absolute-path torch includes
|
| 887 |
+
inp_name = os.path.basename(input)
|
| 888 |
+
out_name = os.path.basename(output)
|
| 889 |
+
linker_paths = [os.path.dirname(build_paths.ld()), build_paths.glibc_lib()]
|
| 890 |
+
linker_paths = " ".join(["-B" + p for p in linker_paths])
|
| 891 |
+
else:
|
| 892 |
+
inp_name = input
|
| 893 |
+
out_name = output
|
| 894 |
+
linker_paths = "" # let the compiler pick
|
| 895 |
+
return re.sub(
|
| 896 |
+
r"[ \n]+",
|
| 897 |
+
" ",
|
| 898 |
+
f"""
|
| 899 |
+
{cpp_compiler()} {inp_name} {get_shared(shared)}
|
| 900 |
+
{get_warning_all_flag(warning_all)} {cpp_flags()}
|
| 901 |
+
{ipaths} {lpaths} {libs} {macros} {linker_paths}
|
| 902 |
+
{optimization_flags()}
|
| 903 |
+
{use_custom_generated_macros()}
|
| 904 |
+
{use_fb_internal_macros()}
|
| 905 |
+
{use_standard_sys_dir_headers()}
|
| 906 |
+
-o {out_name}
|
| 907 |
+
""",
|
| 908 |
+
).strip()
|
| 909 |
+
|
| 910 |
+
|
| 911 |
+
class CudaKernelParamCache:
|
| 912 |
+
cache = dict()
|
| 913 |
+
clear = staticmethod(cache.clear)
|
| 914 |
+
|
| 915 |
+
@classmethod
|
| 916 |
+
def set(cls, key, params, cubin):
|
| 917 |
+
_, path = write(
|
| 918 |
+
cubin,
|
| 919 |
+
"cubin",
|
| 920 |
+
hash_type="cubin",
|
| 921 |
+
specified_dir=config.aot_inductor_output_path,
|
| 922 |
+
)
|
| 923 |
+
params["cubin_path"] = path
|
| 924 |
+
cls.cache[key] = params
|
| 925 |
+
|
| 926 |
+
@classmethod
|
| 927 |
+
def get(cls, key):
|
| 928 |
+
return cls.cache.get(key, None)
|
| 929 |
+
|
| 930 |
+
|
| 931 |
+
class AotCodeCache:
|
| 932 |
+
cache = dict()
|
| 933 |
+
clear = staticmethod(cache.clear)
|
| 934 |
+
|
| 935 |
+
@classmethod
|
| 936 |
+
def compile(cls, graph, source_code, cuda):
|
| 937 |
+
# TODO: update cpp_compile_command for different platforms
|
| 938 |
+
picked_vec_isa = invalid_vec_isa if cuda else pick_vec_isa()
|
| 939 |
+
cpp_command = repr(
|
| 940 |
+
cpp_compile_command(
|
| 941 |
+
"i", "o", vec_isa=picked_vec_isa, cuda=cuda, aot_mode=graph.aot_mode
|
| 942 |
+
)
|
| 943 |
+
)
|
| 944 |
+
key, input_path = write(
|
| 945 |
+
source_code,
|
| 946 |
+
"cpp",
|
| 947 |
+
extra=cpp_command,
|
| 948 |
+
specified_dir=config.aot_inductor_output_path,
|
| 949 |
+
)
|
| 950 |
+
if key not in cls.cache:
|
| 951 |
+
from filelock import FileLock
|
| 952 |
+
|
| 953 |
+
lock_dir = get_lock_dir()
|
| 954 |
+
lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
|
| 955 |
+
with lock:
|
| 956 |
+
output_so = os.path.splitext(input_path)[0] + ".so"
|
| 957 |
+
|
| 958 |
+
if not os.path.exists(output_so):
|
| 959 |
+
cmd = shlex.split(
|
| 960 |
+
cpp_compile_command(
|
| 961 |
+
input=input_path,
|
| 962 |
+
output=output_so,
|
| 963 |
+
vec_isa=picked_vec_isa,
|
| 964 |
+
cuda=cuda,
|
| 965 |
+
aot_mode=graph.aot_mode,
|
| 966 |
+
)
|
| 967 |
+
)
|
| 968 |
+
log.debug("aot compilation command: %s", " ".join(cmd))
|
| 969 |
+
try:
|
| 970 |
+
subprocess.check_call(cmd)
|
| 971 |
+
except subprocess.CalledProcessError as e:
|
| 972 |
+
raise exc.CppCompileError(cmd, e.output) from e
|
| 973 |
+
else:
|
| 974 |
+
log.debug(
|
| 975 |
+
"aot_inductor dynamic library already exist: %s", output_so
|
| 976 |
+
)
|
| 977 |
+
|
| 978 |
+
cls.cache[key] = output_so
|
| 979 |
+
|
| 980 |
+
def wrapper_call(*args):
|
| 981 |
+
assert len(graph.graph_outputs) > 0
|
| 982 |
+
return cls.cache[key], *(None for i in range(len(graph.graph_outputs) - 1))
|
| 983 |
+
|
| 984 |
+
return wrapper_call
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
# Putting this fn in cpp.py (unfortunately) causes a deadlock, which is why it's in codecache.py.
|
| 988 |
+
# Why? importing from cpp.py invokes codecache.pick_vec_isa(), which takes out a lock.
|
| 989 |
+
# Cycle goes:
|
| 990 |
+
# - CppCodeCache.load()
|
| 991 |
+
# - pick_vec_isa()
|
| 992 |
+
# - valid_vec_isa_list()
|
| 993 |
+
# - VecISA.__bool__() <-- takes out a lock
|
| 994 |
+
# - compile_file() <-- imports cpp_prefix_path from cpp, which causes us to try to take out the same lock.
|
| 995 |
+
@functools.lru_cache
|
| 996 |
+
def cpp_prefix_path():
|
| 997 |
+
path = Path(__file__).parent / "codegen/cpp_prefix.h"
|
| 998 |
+
with path.open() as f:
|
| 999 |
+
content = f.read()
|
| 1000 |
+
_, filename = write(
|
| 1001 |
+
content,
|
| 1002 |
+
"h",
|
| 1003 |
+
)
|
| 1004 |
+
return filename
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
def cpp_prefix():
|
| 1008 |
+
filename = cpp_prefix_path()
|
| 1009 |
+
if config.is_fbcode():
|
| 1010 |
+
# We need relative paths, since we bundle up
|
| 1011 |
+
# everything that we compile into a folder for remote compilation.
|
| 1012 |
+
return f'#include "{os.path.basename(filename)}"'
|
| 1013 |
+
else:
|
| 1014 |
+
return f'#include "{filename}"'
|
| 1015 |
+
|
| 1016 |
+
|
| 1017 |
+
# Given a path to an input cpp file and an output path,
|
| 1018 |
+
# Attempts to compile the file, storing the output in "output_path"
|
| 1019 |
+
def compile_file(input_path, output_path, cmd) -> None:
|
| 1020 |
+
input_file = os.path.basename(input_path) if config.is_fbcode() else input_path
|
| 1021 |
+
try:
|
| 1022 |
+
if config.is_fbcode():
|
| 1023 |
+
# Need to copy our header into the same folder as the sourcecode.
|
| 1024 |
+
header_path = cpp_prefix_path()
|
| 1025 |
+
header_name = os.path.basename(header_path)
|
| 1026 |
+
output_name = os.path.basename(output_path)
|
| 1027 |
+
# When we build remotely, we need to make sure to carefully copy any files
|
| 1028 |
+
# that are required during the compilation process into our build directly.
|
| 1029 |
+
# This is where all of the ATen/c10/Torch includes come from.
|
| 1030 |
+
torch_includes_path = os.path.join(
|
| 1031 |
+
torch.utils.cpp_extension._TORCH_PATH, "include"
|
| 1032 |
+
)
|
| 1033 |
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
| 1034 |
+
# Copy everything to tmp compilation folder
|
| 1035 |
+
shutil.copy(header_path, os.path.join(tmp_dir, header_name))
|
| 1036 |
+
shutil.copy(input_path, os.path.join(tmp_dir, input_file))
|
| 1037 |
+
dest_include_path = os.path.join(tmp_dir, "include")
|
| 1038 |
+
shutil.copytree(torch_includes_path, dest_include_path)
|
| 1039 |
+
# Run the build
|
| 1040 |
+
output_file_path = _run_build_command(cmd, tmp_dir, output_name)
|
| 1041 |
+
# Copy output from the build
|
| 1042 |
+
if os.path.exists(output_path):
|
| 1043 |
+
os.remove(output_path)
|
| 1044 |
+
shutil.copy(output_file_path, output_path)
|
| 1045 |
+
else:
|
| 1046 |
+
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
| 1047 |
+
except subprocess.CalledProcessError as e:
|
| 1048 |
+
output = e.output.decode("utf-8")
|
| 1049 |
+
openmp_problem = "'omp.h' file not found" in output or "libomp" in output
|
| 1050 |
+
if openmp_problem and sys.platform == "darwin":
|
| 1051 |
+
instruction = (
|
| 1052 |
+
"\n\nOpenMP support not found. Please try one of the following solutions:\n"
|
| 1053 |
+
"(1) Set the `CXX` environment variable to a compiler other than Apple clang++/g++ "
|
| 1054 |
+
"that has builtin OpenMP support;\n"
|
| 1055 |
+
"(2) install OpenMP via conda: `conda install llvm-openmp`;\n"
|
| 1056 |
+
"(3) install libomp via brew: `brew install libomp`;\n"
|
| 1057 |
+
"(4) manually setup OpenMP and set the `OMP_PREFIX` environment variable to point to a path"
|
| 1058 |
+
" with `include/omp.h` under it."
|
| 1059 |
+
)
|
| 1060 |
+
output += instruction
|
| 1061 |
+
raise exc.CppCompileError(cmd, output) from e
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
class CppCodeCache:
|
| 1065 |
+
cache = dict()
|
| 1066 |
+
clear = staticmethod(cache.clear)
|
| 1067 |
+
|
| 1068 |
+
@staticmethod
|
| 1069 |
+
def _load_library(path):
|
| 1070 |
+
try:
|
| 1071 |
+
return cdll.LoadLibrary(path)
|
| 1072 |
+
except OSError as e:
|
| 1073 |
+
if "gomp" in str(e) and os.path.exists("/usr/lib64/libgomp.so.1"):
|
| 1074 |
+
# hacky workaround for fbcode/buck
|
| 1075 |
+
global _libgomp
|
| 1076 |
+
_libgomp = cdll.LoadLibrary("/usr/lib64/libgomp.so.1")
|
| 1077 |
+
return cdll.LoadLibrary(path)
|
| 1078 |
+
if "failed to map segment from shared object" in str(e):
|
| 1079 |
+
raise OSError(
|
| 1080 |
+
f"{e}. The most common reason this may occur is if the {tempfile.gettempdir()} folder "
|
| 1081 |
+
"is mounted with noexec (e.g., by default Docker mounts tmp file systems "
|
| 1082 |
+
f"as noexec). Please remount {tempfile.gettempdir()} with exec enabled, or set another "
|
| 1083 |
+
"temporary directory with TORCHINDUCTOR_CACHE_DIR environment variable."
|
| 1084 |
+
) from e
|
| 1085 |
+
raise
|
| 1086 |
+
|
| 1087 |
+
@classmethod
|
| 1088 |
+
def load(cls, source_code):
|
| 1089 |
+
picked_vec_isa = pick_vec_isa()
|
| 1090 |
+
cpp_command = repr(cpp_compile_command("i", "o", vec_isa=picked_vec_isa))
|
| 1091 |
+
key, input_path = write(source_code, "cpp", extra=cpp_command)
|
| 1092 |
+
if key not in cls.cache:
|
| 1093 |
+
from filelock import FileLock
|
| 1094 |
+
|
| 1095 |
+
lock_dir = get_lock_dir()
|
| 1096 |
+
lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
|
| 1097 |
+
with lock:
|
| 1098 |
+
output_path = input_path[:-3] + "so"
|
| 1099 |
+
if not os.path.exists(output_path):
|
| 1100 |
+
cmd = shlex.split(
|
| 1101 |
+
cpp_compile_command(
|
| 1102 |
+
input=input_path, output=output_path, vec_isa=picked_vec_isa
|
| 1103 |
+
)
|
| 1104 |
+
)
|
| 1105 |
+
compile_file(input_path, output_path, cmd)
|
| 1106 |
+
cls.cache[key] = cls._load_library(output_path)
|
| 1107 |
+
cls.cache[key].key = key
|
| 1108 |
+
|
| 1109 |
+
return cls.cache[key]
|
| 1110 |
+
|
| 1111 |
+
|
| 1112 |
+
class PyCodeCache:
|
| 1113 |
+
cache: Dict[Any, types.ModuleType] = dict()
|
| 1114 |
+
linemaps = dict()
|
| 1115 |
+
clear = staticmethod(cache.clear)
|
| 1116 |
+
|
| 1117 |
+
@classmethod
|
| 1118 |
+
def write(cls, source_code, extra=""):
|
| 1119 |
+
return write(source_code, "py", extra=extra)
|
| 1120 |
+
|
| 1121 |
+
@classmethod
|
| 1122 |
+
def load(cls, source_code, extra="", linemap=()):
|
| 1123 |
+
key, path = write(source_code, "py", extra=extra)
|
| 1124 |
+
return cls.load_by_key_path(key, path, linemap)
|
| 1125 |
+
|
| 1126 |
+
@classmethod
|
| 1127 |
+
def load_by_key_path(cls, key, path, linemap=()):
|
| 1128 |
+
if key not in cls.cache:
|
| 1129 |
+
with open(path) as f:
|
| 1130 |
+
try:
|
| 1131 |
+
code = compile(f.read(), path, "exec")
|
| 1132 |
+
except Exception as e:
|
| 1133 |
+
raise RuntimeError(
|
| 1134 |
+
f"Failed to import {path}\n{type(e).__name__}: {e}"
|
| 1135 |
+
)
|
| 1136 |
+
mod = types.ModuleType(f"{__name__}.{key}")
|
| 1137 |
+
mod.__file__ = path
|
| 1138 |
+
mod.key = key
|
| 1139 |
+
exec(code, mod.__dict__, mod.__dict__)
|
| 1140 |
+
sys.modules[mod.__name__] = mod
|
| 1141 |
+
# another thread might set this first
|
| 1142 |
+
cls.cache.setdefault(key, mod)
|
| 1143 |
+
# unzip into separate lines/nodes lists
|
| 1144 |
+
cls.linemaps[path] = list(zip(*linemap))
|
| 1145 |
+
|
| 1146 |
+
return cls.cache[key]
|
| 1147 |
+
|
| 1148 |
+
@classmethod
|
| 1149 |
+
@functools.lru_cache(None)
|
| 1150 |
+
def stack_frames_for_code(cls, path, lineno):
|
| 1151 |
+
if path not in cls.linemaps:
|
| 1152 |
+
return None
|
| 1153 |
+
# [(starting_line, <fx node>), ...]
|
| 1154 |
+
lines, nodes = cls.linemaps[path]
|
| 1155 |
+
p = bisect_right(lines, lineno)
|
| 1156 |
+
if p == 0:
|
| 1157 |
+
return None
|
| 1158 |
+
entry = nodes[p - 1]
|
| 1159 |
+
if not entry:
|
| 1160 |
+
return None
|
| 1161 |
+
|
| 1162 |
+
def parse_stack_trace(stack_trace):
|
| 1163 |
+
# ideally fx stores stack traces as data rather than a string
|
| 1164 |
+
# but this is not along a performance critical path
|
| 1165 |
+
regex = r'File "(.+)", line (\d+), in (.+)\n'
|
| 1166 |
+
matches = re.findall(regex, stack_trace)
|
| 1167 |
+
return [
|
| 1168 |
+
{"filename": f, "line": int(l), "name": n}
|
| 1169 |
+
for f, l, n in reversed(matches)
|
| 1170 |
+
]
|
| 1171 |
+
|
| 1172 |
+
return parse_stack_trace(entry)
|
| 1173 |
+
|
| 1174 |
+
|
| 1175 |
+
class CppWrapperCodeCache:
|
| 1176 |
+
cache = dict()
|
| 1177 |
+
clear = staticmethod(cache.clear)
|
| 1178 |
+
|
| 1179 |
+
@classmethod
|
| 1180 |
+
def load(cls, source_code, func_name, key, cuda):
|
| 1181 |
+
name = f"inline_extension_{key}"
|
| 1182 |
+
cpp_wrapper_dir = cpp_wrapper_cache_dir(name)
|
| 1183 |
+
if not os.path.exists(cpp_wrapper_dir):
|
| 1184 |
+
os.makedirs(cpp_wrapper_dir)
|
| 1185 |
+
|
| 1186 |
+
ext = "so"
|
| 1187 |
+
filepath = os.path.join(cpp_wrapper_dir, f"{name}.{ext}")
|
| 1188 |
+
log.debug("Cpp wrapper code path %s", filepath)
|
| 1189 |
+
|
| 1190 |
+
if key not in cls.cache:
|
| 1191 |
+
log.debug("Cpp wrapper cache miss for %s", filepath)
|
| 1192 |
+
from filelock import FileLock
|
| 1193 |
+
|
| 1194 |
+
lock_dir = get_lock_dir()
|
| 1195 |
+
lock = FileLock(os.path.join(lock_dir, key + ".lock"), timeout=LOCK_TIMEOUT)
|
| 1196 |
+
with lock:
|
| 1197 |
+
if not os.path.exists(filepath):
|
| 1198 |
+
log.debug("Cpp wrapper building %s", filepath)
|
| 1199 |
+
|
| 1200 |
+
_cpp_flags = cpp_flags()
|
| 1201 |
+
_opt_flags = optimization_flags()
|
| 1202 |
+
_shared = get_shared()
|
| 1203 |
+
_warning_all_flag = get_warning_all_flag()
|
| 1204 |
+
_ipaths, _lpaths, _libs, _macros = get_include_and_linking_paths(
|
| 1205 |
+
vec_isa=pick_vec_isa(),
|
| 1206 |
+
cuda=cuda,
|
| 1207 |
+
)
|
| 1208 |
+
_use_custom_generated_macros = use_custom_generated_macros()
|
| 1209 |
+
_cpp_wrapper_flags = cpp_wrapper_flags()
|
| 1210 |
+
|
| 1211 |
+
extra_cflags = f"{_cpp_flags} {_opt_flags} {_warning_all_flag} {_macros} {_cpp_wrapper_flags} \
|
| 1212 |
+
{_use_custom_generated_macros}"
|
| 1213 |
+
# For CPP wrapper, add -ffast-math during linking to make CPU flush denormals.
|
| 1214 |
+
# CPP wrapper leverages cpp_extension which will do the compilation and linking in two stages.
|
| 1215 |
+
# We need to explicitly add -ffast-math as a linking flag.
|
| 1216 |
+
# For the default python wrapper, the compilation and linking are done in one command thus -ffast-math
|
| 1217 |
+
# will take effect in both compilation and linking.
|
| 1218 |
+
extra_ldflags = f"{_shared} {_lpaths} {_libs} -ffast-math"
|
| 1219 |
+
extra_include_paths = f"{_ipaths}"
|
| 1220 |
+
|
| 1221 |
+
mod = torch.utils.cpp_extension.load_inline(
|
| 1222 |
+
name=name,
|
| 1223 |
+
build_directory=cpp_wrapper_dir,
|
| 1224 |
+
cpp_sources=[source_code],
|
| 1225 |
+
functions=[func_name],
|
| 1226 |
+
extra_cflags=[extra_cflags],
|
| 1227 |
+
extra_ldflags=[extra_ldflags],
|
| 1228 |
+
extra_include_paths=[extra_include_paths],
|
| 1229 |
+
use_pch=True,
|
| 1230 |
+
)
|
| 1231 |
+
log.debug("Cpp wrapper done building %s", filepath)
|
| 1232 |
+
else:
|
| 1233 |
+
log.debug("Found target .so, cpp wrapper loading %s", filepath)
|
| 1234 |
+
spec = importlib.util.spec_from_file_location(name, filepath)
|
| 1235 |
+
assert spec is not None
|
| 1236 |
+
mod = importlib.util.module_from_spec(spec)
|
| 1237 |
+
assert isinstance(spec.loader, abc.Loader)
|
| 1238 |
+
spec.loader.exec_module(mod)
|
| 1239 |
+
log.debug("Cpp wrapper done loading %s", filepath)
|
| 1240 |
+
|
| 1241 |
+
cls.cache[key] = mod
|
| 1242 |
+
|
| 1243 |
+
return cls.cache[key]
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
class TritonCodeCache:
|
| 1247 |
+
@classmethod
|
| 1248 |
+
def load(cls, kernel_name, source_code):
|
| 1249 |
+
mod = PyCodeCache.load(source_code)
|
| 1250 |
+
return getattr(mod, kernel_name)
|
| 1251 |
+
|
| 1252 |
+
|
| 1253 |
+
def _worker_compile(kernel_name, source_code, cc, device):
|
| 1254 |
+
cuda_properties.set_compiler_worker_current_device(device)
|
| 1255 |
+
kernel = TritonCodeCache.load(kernel_name, source_code)
|
| 1256 |
+
kernel.precompile(warm_cache_only_with_cc=cc)
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
def _load_kernel(kernel_name, source_code):
|
| 1260 |
+
kernel = TritonCodeCache.load(kernel_name, source_code)
|
| 1261 |
+
kernel.precompile()
|
| 1262 |
+
return kernel
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
class TritonFuture:
|
| 1266 |
+
def __init__(self, kernel_name, source_code, future):
|
| 1267 |
+
self.kernel_name = kernel_name
|
| 1268 |
+
self.source_code = source_code
|
| 1269 |
+
self.future = future
|
| 1270 |
+
|
| 1271 |
+
# @dynamo_utils.dynamo_timed
|
| 1272 |
+
def result(self):
|
| 1273 |
+
t0 = time()
|
| 1274 |
+
if hasattr(self, "kernel"):
|
| 1275 |
+
return self.kernel
|
| 1276 |
+
# If the worker failed this will throw an exception.
|
| 1277 |
+
self.future.result()
|
| 1278 |
+
kernel = self.kernel = _load_kernel(self.kernel_name, self.source_code)
|
| 1279 |
+
latency = time() - t0
|
| 1280 |
+
if latency > 50:
|
| 1281 |
+
developer_warning(
|
| 1282 |
+
f"Detected long compilation time of {latency} seconds for kernel name {self.kernel_name}"
|
| 1283 |
+
)
|
| 1284 |
+
developer_warning(self.source_code)
|
| 1285 |
+
del self.kernel_name, self.source_code, self.future
|
| 1286 |
+
return kernel
|
| 1287 |
+
|
| 1288 |
+
|
| 1289 |
+
class AsyncCompile:
|
| 1290 |
+
def __init__(self):
|
| 1291 |
+
pass
|
| 1292 |
+
|
| 1293 |
+
@staticmethod
|
| 1294 |
+
@functools.lru_cache(1)
|
| 1295 |
+
def pool():
|
| 1296 |
+
assert config.compile_threads > 1
|
| 1297 |
+
return ThreadPoolExecutor(config.compile_threads)
|
| 1298 |
+
|
| 1299 |
+
@staticmethod
|
| 1300 |
+
@functools.lru_cache(1)
|
| 1301 |
+
def process_pool():
|
| 1302 |
+
# ensure properties have been calculated before processes
|
| 1303 |
+
# are forked
|
| 1304 |
+
cuda_properties._properties()
|
| 1305 |
+
assert config.compile_threads > 1
|
| 1306 |
+
orig_ppid = os.getpid()
|
| 1307 |
+
|
| 1308 |
+
# if this process dies abnormally (e.g. segfault)
|
| 1309 |
+
# it will not shut down the workers. Instead
|
| 1310 |
+
# the workers will have their parent reassigned to the
|
| 1311 |
+
# init process. This launches a separate thread to
|
| 1312 |
+
# watch for the worker getting reassigned,
|
| 1313 |
+
# and cleans it up in this case.
|
| 1314 |
+
def init():
|
| 1315 |
+
def run():
|
| 1316 |
+
while True:
|
| 1317 |
+
sleep(1)
|
| 1318 |
+
if orig_ppid != os.getppid():
|
| 1319 |
+
os.kill(os.getpid(), signal.SIGKILL)
|
| 1320 |
+
|
| 1321 |
+
global _watchdog_thread
|
| 1322 |
+
_watchdog_thread = Thread(target=run, daemon=True)
|
| 1323 |
+
_watchdog_thread.start()
|
| 1324 |
+
|
| 1325 |
+
# we rely on 'fork' because we cannot control whether users
|
| 1326 |
+
# have an `if __name__ == '__main__'` in their main process.
|
| 1327 |
+
fork_context = multiprocessing.get_context("fork")
|
| 1328 |
+
pool = ProcessPoolExecutor(
|
| 1329 |
+
config.compile_threads, mp_context=fork_context, initializer=init
|
| 1330 |
+
)
|
| 1331 |
+
# when this pool is created in a subprocess object, the normal exit handler
|
| 1332 |
+
# doesn't run, and we need to register our own handler.
|
| 1333 |
+
# exitpriority has to be high, because another one of the finalizers will
|
| 1334 |
+
# kill the worker thread that sends the shutdown message to the workers...
|
| 1335 |
+
multiprocessing.util.Finalize(None, pool.shutdown, exitpriority=sys.maxsize)
|
| 1336 |
+
return pool
|
| 1337 |
+
|
| 1338 |
+
@classmethod
|
| 1339 |
+
def warm_pool(cls):
|
| 1340 |
+
if config.compile_threads <= 1:
|
| 1341 |
+
return
|
| 1342 |
+
_compile_start()
|
| 1343 |
+
pool = cls.process_pool()
|
| 1344 |
+
|
| 1345 |
+
# We have to fork processes for compiler workers, but the more memory and other resources that are loaded, the
|
| 1346 |
+
# slower the os.fork time is, quite drastically. It also holds the GIL so we can't put it on another thread.
|
| 1347 |
+
|
| 1348 |
+
# Examples:
|
| 1349 |
+
# A simple x + x + x script: 10ms seconds in the middle of the program, 2ms at startup
|
| 1350 |
+
# tf_efficientnet_b0 benchmark: 50ms! in the middle of the program , 3ms at startup
|
| 1351 |
+
|
| 1352 |
+
# So we want to start the workers early when it is still cheap, and also to allow the workers to get
|
| 1353 |
+
# ready before we have work for them.
|
| 1354 |
+
|
| 1355 |
+
# ProcessPoolExecutor also does not launch the workers until it finds a point when all the workers are idle.
|
| 1356 |
+
# But if we waited until then fork time will be long and we will be waiting for the processes to initialize.
|
| 1357 |
+
|
| 1358 |
+
# We force them to start here with some YOLOing of the internal methods.
|
| 1359 |
+
if hasattr(pool, "_start_queue_management_thread"):
|
| 1360 |
+
pool._start_queue_management_thread()
|
| 1361 |
+
else:
|
| 1362 |
+
for _ in range(config.compile_threads):
|
| 1363 |
+
pool._adjust_process_count()
|
| 1364 |
+
pool._start_executor_manager_thread()
|
| 1365 |
+
_compile_end()
|
| 1366 |
+
|
| 1367 |
+
@classmethod
|
| 1368 |
+
def submit(cls, task):
|
| 1369 |
+
if config.compile_threads <= 1:
|
| 1370 |
+
return task()
|
| 1371 |
+
return cls.pool().submit(task)
|
| 1372 |
+
|
| 1373 |
+
@classmethod
|
| 1374 |
+
def map(cls, fn, seq):
|
| 1375 |
+
if config.compile_threads <= 1 or len(seq) <= 1:
|
| 1376 |
+
return list(map(fn, seq))
|
| 1377 |
+
return [t.result() for t in [cls.pool().submit(fn, x) for x in seq]]
|
| 1378 |
+
|
| 1379 |
+
def triton(self, kernel_name, source_code):
|
| 1380 |
+
_compile_start()
|
| 1381 |
+
|
| 1382 |
+
if config.compile_threads > 1:
|
| 1383 |
+
major, minor = torch.cuda.get_device_capability()
|
| 1384 |
+
device = torch.cuda.current_device()
|
| 1385 |
+
cc = major * 10 + minor
|
| 1386 |
+
future = self.process_pool().submit(
|
| 1387 |
+
_worker_compile, kernel_name, source_code, cc, device
|
| 1388 |
+
)
|
| 1389 |
+
return TritonFuture(kernel_name, source_code, future)
|
| 1390 |
+
else:
|
| 1391 |
+
return _load_kernel(kernel_name, source_code)
|
| 1392 |
+
|
| 1393 |
+
def cpp(self, source_code):
|
| 1394 |
+
def task():
|
| 1395 |
+
return CppCodeCache.load(source_code).kernel
|
| 1396 |
+
|
| 1397 |
+
return self.submit(task)
|
| 1398 |
+
|
| 1399 |
+
def wait(self, scope: Dict[str, Any]):
|
| 1400 |
+
num_kernels = len(
|
| 1401 |
+
[
|
| 1402 |
+
value
|
| 1403 |
+
for key, value in scope.items()
|
| 1404 |
+
if isinstance(value, (Future, TritonFuture))
|
| 1405 |
+
]
|
| 1406 |
+
)
|
| 1407 |
+
pbar = tqdm(
|
| 1408 |
+
total=num_kernels,
|
| 1409 |
+
desc="Inductor Compilation",
|
| 1410 |
+
disable=config.disable_progress,
|
| 1411 |
+
delay=0,
|
| 1412 |
+
)
|
| 1413 |
+
if config.compile_threads > 1:
|
| 1414 |
+
for key, result in scope.items():
|
| 1415 |
+
if config.verbose_progress and not isinstance(pbar, _Faketqdm):
|
| 1416 |
+
pbar.set_postfix_str(key)
|
| 1417 |
+
if isinstance(result, (Future, TritonFuture)):
|
| 1418 |
+
scope[key] = result.result()
|
| 1419 |
+
pbar.update(1)
|
| 1420 |
+
|
| 1421 |
+
_compile_end()
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
AsyncCompile.warm_pool()
|
llava_next/lib/python3.10/site-packages/torch/_inductor/compile_fx.py
ADDED
|
@@ -0,0 +1,1284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import sys
|
| 7 |
+
import warnings
|
| 8 |
+
|
| 9 |
+
from functools import wraps
|
| 10 |
+
from typing import Any, Callable, Dict, FrozenSet, List, Optional, Sequence, Union
|
| 11 |
+
from unittest import mock
|
| 12 |
+
|
| 13 |
+
from functorch.compile import min_cut_rematerialization_partition
|
| 14 |
+
|
| 15 |
+
import torch._functorch.config as functorch_config
|
| 16 |
+
|
| 17 |
+
import torch.fx
|
| 18 |
+
import torch.utils._pytree as pytree
|
| 19 |
+
from torch._dynamo import (
|
| 20 |
+
compiled_autograd,
|
| 21 |
+
logging as dynamo_logging,
|
| 22 |
+
utils as dynamo_utils,
|
| 23 |
+
)
|
| 24 |
+
from torch._dynamo.utils import detect_fake_mode
|
| 25 |
+
from torch._functorch.aot_autograd import make_boxed_func
|
| 26 |
+
from torch._inductor.codecache import code_hash, CompiledFxGraph
|
| 27 |
+
|
| 28 |
+
from torch._inductor.debug import save_args_for_compile_fx_inner
|
| 29 |
+
from torch._ops import OpOverload
|
| 30 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
| 31 |
+
from torch.fx.passes.fake_tensor_prop import FakeTensorProp
|
| 32 |
+
|
| 33 |
+
from .._dynamo.backends.common import aot_autograd
|
| 34 |
+
from ..fx.graph import _PyTreeCodeGen
|
| 35 |
+
from . import config, metrics
|
| 36 |
+
from .debug import DebugContext
|
| 37 |
+
from .decomposition import select_decomp_table
|
| 38 |
+
from .fx_passes.joint_graph import joint_graph_passes
|
| 39 |
+
from .fx_passes.post_grad import post_grad_passes, view_to_reshape
|
| 40 |
+
from .fx_passes.pre_grad import pre_grad_passes
|
| 41 |
+
from .graph import GraphLowering
|
| 42 |
+
from .pattern_matcher import clone_graph
|
| 43 |
+
from .utils import get_dtype_size, has_incompatible_cudagraph_ops
|
| 44 |
+
from .virtualized import V
|
| 45 |
+
|
| 46 |
+
if config.is_fbcode():
|
| 47 |
+
from torch._inductor.fb.utils import time_and_log # type: ignore[import]
|
| 48 |
+
else:
|
| 49 |
+
# no-op decorator
|
| 50 |
+
def time_and_log(attr: str):
|
| 51 |
+
def wrap(old_func):
|
| 52 |
+
@wraps(old_func)
|
| 53 |
+
def newFunction(*args, **kwargs):
|
| 54 |
+
return old_func(*args, **kwargs)
|
| 55 |
+
|
| 56 |
+
return newFunction
|
| 57 |
+
|
| 58 |
+
return wrap
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
log = logging.getLogger(__name__)
|
| 62 |
+
perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
|
| 63 |
+
ALIGNMENT = 16
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@dataclasses.dataclass
|
| 67 |
+
class BoxedBool:
|
| 68 |
+
value: bool
|
| 69 |
+
|
| 70 |
+
def __bool__(self):
|
| 71 |
+
return self.value
|
| 72 |
+
|
| 73 |
+
@staticmethod
|
| 74 |
+
def disable(obj):
|
| 75 |
+
if isinstance(obj, BoxedBool):
|
| 76 |
+
obj.value = False
|
| 77 |
+
return obj
|
| 78 |
+
return False
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@dataclasses.dataclass
|
| 82 |
+
class BoxedDeviceIndex:
|
| 83 |
+
value: Optional[int]
|
| 84 |
+
|
| 85 |
+
def set(self, device_idx):
|
| 86 |
+
assert device_idx is None or isinstance(device_idx, int)
|
| 87 |
+
self.value = device_idx
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# copy_ fails when trying to write to tensors with memory overlap,
|
| 91 |
+
# for expanded dimensions (a dimension which used to have size 1 -> ?)
|
| 92 |
+
# we can select one element from that dimension and write to it
|
| 93 |
+
# to achieve writing to all values of that dimension of the input tensor
|
| 94 |
+
def get_expanded_dims(t):
|
| 95 |
+
if not isinstance(t, torch.Tensor):
|
| 96 |
+
return None
|
| 97 |
+
return [i for i in range(t.ndim) if t.stride(i) == 0 and t.size(i) != 1]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def index_expanded_dims(t: torch.Tensor, expanded_dims: List[int]) -> torch.Tensor:
|
| 101 |
+
for expanded_dim in expanded_dims:
|
| 102 |
+
t = torch.ops.aten.slice(t, expanded_dim, 0, 1)
|
| 103 |
+
return t
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def complex_memory_overlap(t: torch.Tensor) -> bool:
|
| 107 |
+
# if torch._debug_has_internal_overlap thinks this tensor potentially has
|
| 108 |
+
# memory overlap internally, let's dig deeper to find out whether it's true.
|
| 109 |
+
t = index_expanded_dims(t, get_expanded_dims(t))
|
| 110 |
+
if torch._debug_has_internal_overlap(t) != 0:
|
| 111 |
+
strides = t.stride()
|
| 112 |
+
sizes = t.shape
|
| 113 |
+
indices = list(range(len(strides)))
|
| 114 |
+
indices = [x for _, x in sorted(zip(strides, indices))]
|
| 115 |
+
for i in range(len(strides)):
|
| 116 |
+
prev_stride = 1 if i == 0 else strides[indices[i - 1]]
|
| 117 |
+
prev_size = 1 if i == 0 else sizes[indices[i - 1]]
|
| 118 |
+
if strides[indices[i]] < prev_stride * prev_size:
|
| 119 |
+
return True
|
| 120 |
+
return False
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@functools.lru_cache(None)
|
| 124 |
+
def _step_logger():
|
| 125 |
+
return dynamo_logging.get_step_logger(log)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
@functools.lru_cache(None)
|
| 129 |
+
def _warn_tf32_disabled():
|
| 130 |
+
if (
|
| 131 |
+
torch.cuda.is_available()
|
| 132 |
+
and not torch.backends.cuda.matmul.allow_tf32
|
| 133 |
+
and torch.cuda.get_device_capability() >= (8, 0)
|
| 134 |
+
):
|
| 135 |
+
warnings.warn(
|
| 136 |
+
"TensorFloat32 tensor cores for float32 matrix multiplication available but not enabled. "
|
| 137 |
+
"Consider setting `torch.set_float32_matmul_precision('high')` for better performance."
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def is_tf32_warning_applicable(gm: torch.fx.GraphModule):
|
| 142 |
+
aten = torch.ops.aten
|
| 143 |
+
tf32_ops = {
|
| 144 |
+
aten.mm.default,
|
| 145 |
+
aten.addmm.default,
|
| 146 |
+
aten.bmm.default,
|
| 147 |
+
aten.baddbmm.default,
|
| 148 |
+
}
|
| 149 |
+
for node in gm.graph.nodes:
|
| 150 |
+
if (
|
| 151 |
+
node.op == "call_function"
|
| 152 |
+
and node.target in tf32_ops
|
| 153 |
+
and isinstance(node.meta.get("val", None), torch.Tensor)
|
| 154 |
+
and node.meta["val"].dtype == torch.float32
|
| 155 |
+
and node.meta["val"].device.type == "cuda"
|
| 156 |
+
):
|
| 157 |
+
return True
|
| 158 |
+
return False
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
@DebugContext.wrap
|
| 162 |
+
def count_bytes_inner(
|
| 163 |
+
gm: torch.fx.GraphModule,
|
| 164 |
+
example_inputs: List[torch.Tensor],
|
| 165 |
+
num_fixed: int = 0,
|
| 166 |
+
**kwargs,
|
| 167 |
+
):
|
| 168 |
+
shape_env = _shape_env_from_inputs(example_inputs)
|
| 169 |
+
|
| 170 |
+
graph = GraphLowering(gm, shape_env=shape_env, num_static_inputs=num_fixed)
|
| 171 |
+
with V.set_graph_handler(graph), V.set_real_inputs(example_inputs): # type: ignore[call-arg]
|
| 172 |
+
graph.run(*example_inputs)
|
| 173 |
+
num_bytes, nodes_num_elem, node_runtimes = graph.count_bytes()
|
| 174 |
+
metrics.num_bytes_accessed += num_bytes
|
| 175 |
+
metrics.nodes_num_elem += nodes_num_elem
|
| 176 |
+
metrics.node_runtimes += node_runtimes
|
| 177 |
+
return make_boxed_func(gm.forward)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def inner_compile_with_cpp_wrapper(inner_compile: Callable[..., Any]):
|
| 181 |
+
@functools.wraps(inner_compile)
|
| 182 |
+
def wrapper(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor], **kwargs):
|
| 183 |
+
"""
|
| 184 |
+
Compile into cpp wrapper:
|
| 185 |
+
For CPU, this is currently done in one pass.
|
| 186 |
+
For GPU, this is done in two passes: JIT-compile the model with python wrapper code
|
| 187 |
+
and run it to generate autotuned kernel binaries in the first pass; and then generate
|
| 188 |
+
cpp wrapper code and compile it to a dynamic library in the second pass.
|
| 189 |
+
"""
|
| 190 |
+
devices = (
|
| 191 |
+
{t.device.type for t in gm.parameters()}
|
| 192 |
+
| {t.device.type for t in gm.buffers()}
|
| 193 |
+
| {t.device.type for t in example_inputs if isinstance(t, torch.Tensor)}
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
if "cuda" not in devices:
|
| 197 |
+
kwargs_patched = {**kwargs, "cpp_wrapper": True}
|
| 198 |
+
return inner_compile(gm, example_inputs, **kwargs_patched)
|
| 199 |
+
else:
|
| 200 |
+
with config.patch( # type: ignore[attr-defined]
|
| 201 |
+
{
|
| 202 |
+
"triton.store_cubin": True,
|
| 203 |
+
}
|
| 204 |
+
):
|
| 205 |
+
# first pass with regular python wrapper code
|
| 206 |
+
kwargs_patched = {
|
| 207 |
+
**kwargs,
|
| 208 |
+
"cpp_wrapper": False,
|
| 209 |
+
}
|
| 210 |
+
# clone_graph(gm) makes sure no graph modification from the first pass will
|
| 211 |
+
# leak to the second pass. It does increase memory pressure, but the problem
|
| 212 |
+
# can be alleviated once we have parameters as FakeTensor.
|
| 213 |
+
|
| 214 |
+
compiled = inner_compile(
|
| 215 |
+
clone_graph(gm), example_inputs, **kwargs_patched
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
def materialize(x):
|
| 219 |
+
if isinstance(x, (torch.SymInt, torch.SymFloat)):
|
| 220 |
+
# Need concrete value to run dynamic shapes and tune the result
|
| 221 |
+
return x.node.hint
|
| 222 |
+
else:
|
| 223 |
+
assert not isinstance(x, FakeTensor)
|
| 224 |
+
return x
|
| 225 |
+
|
| 226 |
+
tracing_context = torch._guards.TracingContext.get()
|
| 227 |
+
if tracing_context:
|
| 228 |
+
if tracing_context.output_strides:
|
| 229 |
+
tracing_context.output_strides.clear()
|
| 230 |
+
|
| 231 |
+
params_flat = [
|
| 232 |
+
param
|
| 233 |
+
for param in tracing_context.params_flat # type: ignore[union-attr]
|
| 234 |
+
if param is not None
|
| 235 |
+
]
|
| 236 |
+
real_inputs = [
|
| 237 |
+
materialize(x) for x in (params_flat + V.real_inputs)
|
| 238 |
+
]
|
| 239 |
+
else:
|
| 240 |
+
real_inputs = [materialize(x) for x in V.real_inputs]
|
| 241 |
+
|
| 242 |
+
with torch.utils._python_dispatch._disable_current_modes():
|
| 243 |
+
compiled(real_inputs)
|
| 244 |
+
|
| 245 |
+
del real_inputs
|
| 246 |
+
|
| 247 |
+
# second pass
|
| 248 |
+
kwargs_patched = {**kwargs, "cpp_wrapper": True}
|
| 249 |
+
return inner_compile(gm, example_inputs, **kwargs_patched)
|
| 250 |
+
|
| 251 |
+
return wrapper
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def fake_tensor_prop(
|
| 255 |
+
gm: torch.fx.GraphModule,
|
| 256 |
+
example_inputs: List[torch.Tensor],
|
| 257 |
+
force_allow_non_fake_inputs: bool = False,
|
| 258 |
+
):
|
| 259 |
+
"""
|
| 260 |
+
If we can not detect fake mode from the context of inputs, create one.
|
| 261 |
+
|
| 262 |
+
The created fake mode will be returned.
|
| 263 |
+
"""
|
| 264 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 265 |
+
if not fake_mode:
|
| 266 |
+
fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
|
| 267 |
+
FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)
|
| 268 |
+
else:
|
| 269 |
+
ctx = (
|
| 270 |
+
contextlib.nullcontext()
|
| 271 |
+
if not force_allow_non_fake_inputs
|
| 272 |
+
else mock.patch.object(fake_mode, "allow_non_fake_inputs", True)
|
| 273 |
+
)
|
| 274 |
+
with ctx: # type: ignore[attr-defined]
|
| 275 |
+
FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(
|
| 276 |
+
*example_inputs
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
return fake_mode
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@DebugContext.wrap
|
| 283 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 284 |
+
@time_and_log(attr="compilation time (in seconds)")
|
| 285 |
+
def compile_fx_inner(
|
| 286 |
+
gm: torch.fx.GraphModule,
|
| 287 |
+
example_inputs: List[torch.Tensor],
|
| 288 |
+
cudagraphs: Optional[BoxedBool] = None,
|
| 289 |
+
num_fixed: int = 0,
|
| 290 |
+
is_backward: bool = False,
|
| 291 |
+
graph_id: Optional[int] = None,
|
| 292 |
+
cpp_wrapper: bool = False,
|
| 293 |
+
aot_mode: bool = False,
|
| 294 |
+
is_inference: bool = False,
|
| 295 |
+
boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
|
| 296 |
+
user_visible_outputs: FrozenSet[str] = frozenset(),
|
| 297 |
+
layout_opt: Optional[bool] = None,
|
| 298 |
+
):
|
| 299 |
+
"""
|
| 300 |
+
Inductor API that compiles a single graph.
|
| 301 |
+
|
| 302 |
+
If you change the argument list for this funtion, make sure you
|
| 303 |
+
also update the call to save_args_for_compile_fx_inner below accordingly.
|
| 304 |
+
"""
|
| 305 |
+
if dynamo_utils.count_calls(gm.graph) == 0:
|
| 306 |
+
return make_boxed_func(gm.forward)
|
| 307 |
+
|
| 308 |
+
if config.save_args:
|
| 309 |
+
save_args_for_compile_fx_inner(
|
| 310 |
+
gm,
|
| 311 |
+
example_inputs,
|
| 312 |
+
cudagraphs=cudagraphs,
|
| 313 |
+
num_fixed=num_fixed,
|
| 314 |
+
is_backward=is_backward,
|
| 315 |
+
graph_id=graph_id,
|
| 316 |
+
cpp_wrapper=cpp_wrapper,
|
| 317 |
+
aot_mode=aot_mode,
|
| 318 |
+
is_inference=is_inference,
|
| 319 |
+
boxed_forward_device_index=boxed_forward_device_index,
|
| 320 |
+
user_visible_outputs=user_visible_outputs,
|
| 321 |
+
layout_opt=layout_opt,
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
if cudagraphs is None:
|
| 325 |
+
cudagraphs = BoxedBool(config.triton.cudagraphs)
|
| 326 |
+
|
| 327 |
+
# Inputs to fx_codegen_and_compile
|
| 328 |
+
graph_args = [gm, example_inputs]
|
| 329 |
+
graph_kwargs = {
|
| 330 |
+
"cudagraphs": cudagraphs,
|
| 331 |
+
"num_fixed": num_fixed,
|
| 332 |
+
"is_backward": is_backward,
|
| 333 |
+
"graph_id": graph_id,
|
| 334 |
+
"cpp_wrapper": cpp_wrapper,
|
| 335 |
+
"aot_mode": aot_mode,
|
| 336 |
+
"is_inference": is_inference,
|
| 337 |
+
"user_visible_outputs": user_visible_outputs,
|
| 338 |
+
"layout_opt": layout_opt,
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
compiled_graph: CompiledFxGraph = fx_codegen_and_compile(
|
| 342 |
+
*graph_args, **graph_kwargs # type: ignore[arg-type]
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
if aot_mode:
|
| 346 |
+
return compiled_graph
|
| 347 |
+
|
| 348 |
+
if cudagraphs:
|
| 349 |
+
# output args are tuple of first argument
|
| 350 |
+
output = list(gm.graph.nodes)[-1]
|
| 351 |
+
assert len(output.args) == 1
|
| 352 |
+
stack_traces = [
|
| 353 |
+
(arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
|
| 354 |
+
for arg in output.args[0]
|
| 355 |
+
]
|
| 356 |
+
|
| 357 |
+
complex_memory_overlap_inputs = any(
|
| 358 |
+
complex_memory_overlap(t)
|
| 359 |
+
for t in example_inputs
|
| 360 |
+
if isinstance(t, torch.Tensor)
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
# doesnt work for non-trees because the warmup run would apply mutation twice
|
| 364 |
+
if config.triton.cudagraph_trees:
|
| 365 |
+
# checking if mutation is only on paramameters/static inputs
|
| 366 |
+
has_mutation = not all(
|
| 367 |
+
idx < num_fixed for idx in compiled_graph.mutated_input_idxs
|
| 368 |
+
)
|
| 369 |
+
else:
|
| 370 |
+
has_mutation = len(compiled_graph.mutated_inputs) != 0
|
| 371 |
+
|
| 372 |
+
cudagraph_tests = [
|
| 373 |
+
(set(compiled_graph.device_types) == {"cuda"}, "non-cuda device in graph"),
|
| 374 |
+
(not has_mutation, "mutated inputs"),
|
| 375 |
+
(not has_incompatible_cudagraph_ops(gm), "incompatible ops"),
|
| 376 |
+
(not complex_memory_overlap_inputs, "complex memory overlap"),
|
| 377 |
+
(
|
| 378 |
+
all(
|
| 379 |
+
isinstance(t, (torch.Tensor, torch.SymInt)) for t in example_inputs
|
| 380 |
+
),
|
| 381 |
+
"non-Tensor inputs",
|
| 382 |
+
),
|
| 383 |
+
(
|
| 384 |
+
(
|
| 385 |
+
len(compiled_graph.device_idxs) == 1
|
| 386 |
+
or not config.triton.cudagraph_trees
|
| 387 |
+
),
|
| 388 |
+
"multiple device indices without cudagraph_trees",
|
| 389 |
+
),
|
| 390 |
+
]
|
| 391 |
+
cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]
|
| 392 |
+
|
| 393 |
+
if not cudagraph_fail_reasons:
|
| 394 |
+
if not config.triton.cudagraph_trees:
|
| 395 |
+
# Force specialize all inputs so that CUDA graphs will work
|
| 396 |
+
for t in example_inputs:
|
| 397 |
+
if isinstance(t, torch.SymInt):
|
| 398 |
+
int(t) # guard
|
| 399 |
+
|
| 400 |
+
if (
|
| 401 |
+
boxed_forward_device_index is not None
|
| 402 |
+
and not is_inference
|
| 403 |
+
and not is_backward
|
| 404 |
+
):
|
| 405 |
+
boxed_forward_device_index.set(next(iter(compiled_graph.device_idxs)))
|
| 406 |
+
|
| 407 |
+
compiled_graph.current_callable = cudagraphify(
|
| 408 |
+
compiled_graph.get_current_callable(),
|
| 409 |
+
example_inputs,
|
| 410 |
+
static_input_idxs=range(num_fixed),
|
| 411 |
+
device_index=next(iter(compiled_graph.device_idxs)),
|
| 412 |
+
stack_traces=stack_traces,
|
| 413 |
+
is_backward=is_backward,
|
| 414 |
+
is_inference=is_inference,
|
| 415 |
+
)
|
| 416 |
+
else:
|
| 417 |
+
BoxedBool.disable(cudagraphs)
|
| 418 |
+
|
| 419 |
+
# See [Backward Generation Handling]
|
| 420 |
+
# if cudagraph'd the forward and set the device, we need to let the cudagraph manager
|
| 421 |
+
# know we are we running the backward even if we will not run it in cudagraphs
|
| 422 |
+
if is_backward and config.triton.cudagraph_trees:
|
| 423 |
+
assert boxed_forward_device_index is not None
|
| 424 |
+
assert boxed_forward_device_index.value is not None
|
| 425 |
+
compiled_graph_callable = compiled_graph.get_current_callable()
|
| 426 |
+
|
| 427 |
+
manager = torch._inductor.cudagraph_trees.get_manager(
|
| 428 |
+
boxed_forward_device_index.value, create_if_none_exists=False
|
| 429 |
+
)
|
| 430 |
+
# should already exist from forward
|
| 431 |
+
assert manager is not None
|
| 432 |
+
|
| 433 |
+
def compiled_artifact(new_inputs):
|
| 434 |
+
manager.set_to_running_backward()
|
| 435 |
+
return compiled_graph_callable(new_inputs)
|
| 436 |
+
|
| 437 |
+
compiled_graph.current_callable = compiled_artifact
|
| 438 |
+
|
| 439 |
+
if len(set(compiled_graph.device_types)) > 1:
|
| 440 |
+
perf_hint_log.warning("skipping cudagraphs due to multiple devices")
|
| 441 |
+
elif set(compiled_graph.device_types) == {"cuda"}:
|
| 442 |
+
if has_mutation:
|
| 443 |
+
perf_hint_log.warning("skipping cudagraphs due to input mutation")
|
| 444 |
+
elif complex_memory_overlap_inputs:
|
| 445 |
+
perf_hint_log.warning(
|
| 446 |
+
"skipping cudagraphs due to complex input striding"
|
| 447 |
+
)
|
| 448 |
+
elif (
|
| 449 |
+
len(compiled_graph.device_idxs) > 1
|
| 450 |
+
and config.triton.cudagraph_trees
|
| 451 |
+
):
|
| 452 |
+
perf_hint_log.warning(
|
| 453 |
+
"skipping cudagraphs due to multiple device indexes"
|
| 454 |
+
)
|
| 455 |
+
else:
|
| 456 |
+
perf_hint_log.warning("skipping cudagraphs for unknown reason")
|
| 457 |
+
else:
|
| 458 |
+
perf_hint_log.warning("skipping cudagraphs for unknown reason")
|
| 459 |
+
|
| 460 |
+
# cudagraphs does its own aligning of inputs
|
| 461 |
+
if not cudagraphs:
|
| 462 |
+
new_callable = align_inputs(
|
| 463 |
+
compiled_graph.get_current_callable(), example_inputs, range(num_fixed)
|
| 464 |
+
)
|
| 465 |
+
if new_callable is not compiled_graph.get_current_callable():
|
| 466 |
+
compiled_graph.current_callable = new_callable
|
| 467 |
+
|
| 468 |
+
_step_logger()(
|
| 469 |
+
logging.INFO,
|
| 470 |
+
"torchinductor done compiling "
|
| 471 |
+
f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
|
| 472 |
+
f"graph {graph_id}",
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
# aot autograd needs to know to pass in inputs as a list
|
| 476 |
+
compiled_graph._boxed_call = True
|
| 477 |
+
return compiled_graph
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def fx_codegen_and_compile(
|
| 481 |
+
gm: torch.fx.GraphModule,
|
| 482 |
+
example_inputs: List[torch.Tensor],
|
| 483 |
+
cudagraphs: Optional[BoxedBool] = None,
|
| 484 |
+
num_fixed: int = 0,
|
| 485 |
+
is_backward: bool = False,
|
| 486 |
+
graph_id: Optional[int] = None,
|
| 487 |
+
cpp_wrapper: bool = False,
|
| 488 |
+
aot_mode: bool = False,
|
| 489 |
+
is_inference: bool = False,
|
| 490 |
+
user_visible_outputs: FrozenSet[str] = frozenset(),
|
| 491 |
+
layout_opt: Optional[bool] = None,
|
| 492 |
+
) -> CompiledFxGraph:
|
| 493 |
+
if is_tf32_warning_applicable(gm):
|
| 494 |
+
_warn_tf32_disabled()
|
| 495 |
+
|
| 496 |
+
# lift the maximum depth of the Python interpreter stack
|
| 497 |
+
# to adapt large/deep models
|
| 498 |
+
sys.setrecursionlimit(max(sys.getrecursionlimit(), 2000))
|
| 499 |
+
|
| 500 |
+
_step_logger()(
|
| 501 |
+
logging.INFO,
|
| 502 |
+
"torchinductor compiling "
|
| 503 |
+
f"{'BACKWARDS' if is_backward else 'FORWARDS'} "
|
| 504 |
+
f"graph {graph_id}",
|
| 505 |
+
)
|
| 506 |
+
V.debug.fx_graph(gm, example_inputs)
|
| 507 |
+
|
| 508 |
+
shape_env = _shape_env_from_inputs(example_inputs)
|
| 509 |
+
|
| 510 |
+
# Convert view to reshape in the graph. This is necessary primarily for
|
| 511 |
+
# layout optimization. Do it unconditionally for uniformity.
|
| 512 |
+
#
|
| 513 |
+
# It's needed because when we do layout optimization, an contiguous tensor
|
| 514 |
+
# in eager mode may becomes a channels last tensor. A view op previously
|
| 515 |
+
# can be applied to the contiguous tensor may not be able to be applied
|
| 516 |
+
# on the channels tensor any more. An error like
|
| 517 |
+
# RuntimeError: view size is not compatible with input tensor's size and stride
|
| 518 |
+
# (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
|
| 519 |
+
# will be printed.
|
| 520 |
+
#
|
| 521 |
+
# Replace view op to reshape op in this case.
|
| 522 |
+
# As an example, timm_resnest/botnet26t_256/convnext_base etc. will fail if we don't do this.
|
| 523 |
+
#
|
| 524 |
+
# Also this has to be done before FakeTensorProp below to avoid the failed
|
| 525 |
+
# .view() call.
|
| 526 |
+
view_to_reshape(gm)
|
| 527 |
+
|
| 528 |
+
fake_mode = fake_tensor_prop(gm, example_inputs)
|
| 529 |
+
|
| 530 |
+
# pattern matcher passes might not preserve striding information
|
| 531 |
+
# on node.meta["val"]. if in the future we rely on these being
|
| 532 |
+
# correct we will need to fix.
|
| 533 |
+
|
| 534 |
+
with V.set_fake_mode(fake_mode): # type: ignore[call-arg]
|
| 535 |
+
# has some issues with memory in training
|
| 536 |
+
post_grad_passes(gm, is_inference=is_inference)
|
| 537 |
+
V.debug.fx_graph_transformed(gm, example_inputs)
|
| 538 |
+
|
| 539 |
+
with V.set_fake_mode(fake_mode): # type: ignore[call-arg]
|
| 540 |
+
graph = GraphLowering(
|
| 541 |
+
gm,
|
| 542 |
+
shape_env=shape_env,
|
| 543 |
+
num_static_inputs=num_fixed,
|
| 544 |
+
graph_id=graph_id,
|
| 545 |
+
cpp_wrapper=cpp_wrapper,
|
| 546 |
+
aot_mode=aot_mode,
|
| 547 |
+
user_visible_outputs=user_visible_outputs,
|
| 548 |
+
)
|
| 549 |
+
with V.set_graph_handler(graph): # type: ignore[call-arg]
|
| 550 |
+
graph.run(*example_inputs)
|
| 551 |
+
context = torch._guards.TracingContext.get()
|
| 552 |
+
if context is not None and context.output_strides is not None:
|
| 553 |
+
# Return the output strides to the caller via TracingContext
|
| 554 |
+
assert len(context.output_strides) == 0
|
| 555 |
+
assert graph.graph_outputs is not None
|
| 556 |
+
for out in graph.graph_outputs:
|
| 557 |
+
if hasattr(out, "layout"):
|
| 558 |
+
context.output_strides.append(
|
| 559 |
+
tuple( # type: ignore[arg-type]
|
| 560 |
+
V.graph.sizevars.size_hint(s) for s in out.layout.stride
|
| 561 |
+
)
|
| 562 |
+
)
|
| 563 |
+
else:
|
| 564 |
+
context.output_strides.append(None)
|
| 565 |
+
compiled_fn = graph.compile_to_fn()
|
| 566 |
+
|
| 567 |
+
if graph.disable_cudagraphs:
|
| 568 |
+
BoxedBool.disable(cudagraphs)
|
| 569 |
+
|
| 570 |
+
compiled_graph = CompiledFxGraph(
|
| 571 |
+
compiled_artifact=compiled_fn,
|
| 572 |
+
cache_key=graph.cache_key,
|
| 573 |
+
artifact_path=graph.cache_path,
|
| 574 |
+
cache_linemap=graph.cache_linemap,
|
| 575 |
+
device_types=graph.device_types,
|
| 576 |
+
device_idxs=graph.device_idxs,
|
| 577 |
+
mutated_inputs=graph.mutated_inputs,
|
| 578 |
+
mutated_input_idxs=set(graph.mutated_input_idxs),
|
| 579 |
+
)
|
| 580 |
+
return compiled_graph
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
def clone_preserve_strides(x: torch.Tensor):
|
| 584 |
+
needed_size = (
|
| 585 |
+
sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
|
| 586 |
+
)
|
| 587 |
+
buffer = torch.as_strided(x, (needed_size,), (1,)).clone()
|
| 588 |
+
return torch.as_strided(buffer, x.size(), x.stride())
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def copy_misaligned_inputs(
|
| 592 |
+
new_inputs: List[torch.Tensor], check_inputs_idxs: Sequence[int]
|
| 593 |
+
) -> None:
|
| 594 |
+
for i in check_inputs_idxs:
|
| 595 |
+
if new_inputs[i].data_ptr() % ALIGNMENT:
|
| 596 |
+
new_inputs[i] = clone_preserve_strides(new_inputs[i])
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
def get_input_idxs_to_check(
|
| 600 |
+
inputs: Union[List[torch.Tensor], Sequence[int]],
|
| 601 |
+
static_input_idxs: Sequence[int],
|
| 602 |
+
) -> Sequence[int]:
|
| 603 |
+
def is_aligned(storage_offset, dtype):
|
| 604 |
+
return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0
|
| 605 |
+
|
| 606 |
+
ids_to_check = []
|
| 607 |
+
for i, input in enumerate(inputs):
|
| 608 |
+
if (
|
| 609 |
+
isinstance(input, torch.Tensor)
|
| 610 |
+
and (
|
| 611 |
+
i not in static_input_idxs
|
| 612 |
+
or not is_aligned(input.storage_offset(), input.dtype)
|
| 613 |
+
)
|
| 614 |
+
and input.device.type == "cuda"
|
| 615 |
+
):
|
| 616 |
+
ids_to_check.append(i)
|
| 617 |
+
return ids_to_check
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
def align_inputs_from_check_idxs(
|
| 621 |
+
model: Callable[[List[torch.Tensor]], Any], inputs_to_check: Sequence[int]
|
| 622 |
+
):
|
| 623 |
+
if len(inputs_to_check) == 0:
|
| 624 |
+
return model
|
| 625 |
+
|
| 626 |
+
def run(new_inputs):
|
| 627 |
+
copy_misaligned_inputs(new_inputs, inputs_to_check)
|
| 628 |
+
return model(new_inputs)
|
| 629 |
+
|
| 630 |
+
return run
|
| 631 |
+
|
| 632 |
+
|
| 633 |
+
def align_inputs(
|
| 634 |
+
model: Callable[[List[torch.Tensor]], Any],
|
| 635 |
+
inputs: List[torch.Tensor],
|
| 636 |
+
static_input_idxs: Sequence[int] = (),
|
| 637 |
+
):
|
| 638 |
+
inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 639 |
+
return align_inputs_from_check_idxs(model, inputs_to_check)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
@dynamo_utils.dynamo_timed
|
| 643 |
+
def cudagraphify(
|
| 644 |
+
model: torch.fx.GraphModule,
|
| 645 |
+
inputs: List[torch.Tensor],
|
| 646 |
+
static_input_idxs: Sequence[int] = (),
|
| 647 |
+
*,
|
| 648 |
+
device_index: int,
|
| 649 |
+
stack_traces: List[Optional[str]],
|
| 650 |
+
is_backward: bool,
|
| 651 |
+
is_inference: bool,
|
| 652 |
+
):
|
| 653 |
+
from torch._inductor.cudagraph_trees import (
|
| 654 |
+
cudagraphify_impl as new_cudagraphify_impl,
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
cudagraphify_fn: Callable[..., Any]
|
| 658 |
+
if config.triton.cudagraph_trees:
|
| 659 |
+
cudagraphify_fn = functools.partial(
|
| 660 |
+
new_cudagraphify_impl,
|
| 661 |
+
device_index=device_index,
|
| 662 |
+
stack_traces=stack_traces,
|
| 663 |
+
is_backward=is_backward,
|
| 664 |
+
is_inference=is_inference,
|
| 665 |
+
)
|
| 666 |
+
else:
|
| 667 |
+
cudagraphify_fn = cudagraphify_impl
|
| 668 |
+
|
| 669 |
+
# if using fake tensors, defer cudagraphs until we get real inputs at runtime
|
| 670 |
+
if not any(isinstance(inp, FakeTensor) for inp in inputs):
|
| 671 |
+
return cudagraphify_fn(model, inputs, static_input_idxs)
|
| 672 |
+
|
| 673 |
+
compiled_fn = None
|
| 674 |
+
|
| 675 |
+
def run(new_inputs):
|
| 676 |
+
nonlocal compiled_fn
|
| 677 |
+
if compiled_fn is None:
|
| 678 |
+
with dynamo_utils.preserve_rng_state():
|
| 679 |
+
compiled_fn = cudagraphify_fn(model, new_inputs, static_input_idxs)
|
| 680 |
+
return compiled_fn(new_inputs)
|
| 681 |
+
|
| 682 |
+
return run
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
def remove_unaligned_input_idxs(
|
| 686 |
+
inputs: Union[List[torch.Tensor], Sequence[int]],
|
| 687 |
+
static_input_idxs: Sequence[int],
|
| 688 |
+
):
|
| 689 |
+
"""
|
| 690 |
+
We require all inputs to be aligned, so introduce a copy for any
|
| 691 |
+
that aren't.
|
| 692 |
+
"""
|
| 693 |
+
aligned_static_input_idxs = []
|
| 694 |
+
for idx, input in zip(static_input_idxs, inputs):
|
| 695 |
+
if isinstance(input, torch.Tensor) and (input.data_ptr() % ALIGNMENT) == 0:
|
| 696 |
+
aligned_static_input_idxs.append(idx)
|
| 697 |
+
if len(aligned_static_input_idxs) != len(static_input_idxs):
|
| 698 |
+
return aligned_static_input_idxs
|
| 699 |
+
return static_input_idxs
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def static_input(x: torch.Tensor):
|
| 703 |
+
"""
|
| 704 |
+
Copy and input while preserving strides
|
| 705 |
+
"""
|
| 706 |
+
# TODO(jansel): figure out why this version doesn't work:
|
| 707 |
+
# return torch.empty_strided(x.size(), x.stride(), dtype=x.dtype, device=x.device)
|
| 708 |
+
needed_size = (
|
| 709 |
+
sum((shape - 1) * stride for shape, stride in zip(x.size(), x.stride())) + 1
|
| 710 |
+
)
|
| 711 |
+
buffer = torch.empty(needed_size, dtype=x.dtype, device=x.device)
|
| 712 |
+
return torch.as_strided(buffer, x.size(), x.stride())
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
def index_expanded_dims_and_copy_(
|
| 716 |
+
dst: torch.Tensor,
|
| 717 |
+
src: torch.Tensor,
|
| 718 |
+
expanded_dims: List[int],
|
| 719 |
+
):
|
| 720 |
+
"Index into expanded dimensions of both dst and src then copy_"
|
| 721 |
+
dst = index_expanded_dims(dst, expanded_dims)
|
| 722 |
+
src = index_expanded_dims(src, expanded_dims)
|
| 723 |
+
dst.copy_(src)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def cudagraphify_impl(
|
| 727 |
+
model: torch.fx.GraphModule,
|
| 728 |
+
inputs: List[torch.Tensor],
|
| 729 |
+
static_input_idxs: Sequence[int] = (),
|
| 730 |
+
):
|
| 731 |
+
"""
|
| 732 |
+
Assumes inputs[static_input_idxs[i]] are always the same memory address
|
| 733 |
+
"""
|
| 734 |
+
check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 735 |
+
static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
|
| 736 |
+
copy_misaligned_inputs(inputs, check_input_idxs)
|
| 737 |
+
|
| 738 |
+
assert isinstance(inputs, list)
|
| 739 |
+
|
| 740 |
+
inps_expanded_dims = [
|
| 741 |
+
get_expanded_dims(x) if idx not in static_input_idxs else []
|
| 742 |
+
for idx, x in enumerate(inputs)
|
| 743 |
+
]
|
| 744 |
+
|
| 745 |
+
# allocate static tensor inputs
|
| 746 |
+
static_inputs = [
|
| 747 |
+
x
|
| 748 |
+
if not isinstance(x, torch.Tensor)
|
| 749 |
+
else static_input(x)
|
| 750 |
+
if idx not in static_input_idxs
|
| 751 |
+
else x.detach()
|
| 752 |
+
for idx, x in enumerate(inputs)
|
| 753 |
+
]
|
| 754 |
+
|
| 755 |
+
# copy over input values for fresh allocations
|
| 756 |
+
for idx, (x, expanded_dims) in enumerate(zip(inputs, inps_expanded_dims)):
|
| 757 |
+
if isinstance(x, torch.Tensor) and idx not in static_input_idxs:
|
| 758 |
+
index_expanded_dims_and_copy_(static_inputs[idx], x, expanded_dims)
|
| 759 |
+
|
| 760 |
+
# warmup
|
| 761 |
+
torch.cuda.synchronize()
|
| 762 |
+
stream = torch.cuda.Stream()
|
| 763 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 764 |
+
# copy static_inputs because it will be cleared in model
|
| 765 |
+
with torch.cuda.stream(stream):
|
| 766 |
+
model(list(static_inputs))
|
| 767 |
+
stream.synchronize()
|
| 768 |
+
torch.cuda.current_stream().wait_stream(stream)
|
| 769 |
+
torch.cuda.synchronize()
|
| 770 |
+
|
| 771 |
+
# record
|
| 772 |
+
graph = torch.cuda.CUDAGraph()
|
| 773 |
+
with torch.cuda.graph(graph, stream=stream, capture_error_mode="thread_local"):
|
| 774 |
+
static_outputs = model(list(static_inputs))
|
| 775 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 776 |
+
static_outputs = (static_outputs,)
|
| 777 |
+
|
| 778 |
+
if config.size_asserts:
|
| 779 |
+
|
| 780 |
+
def run(new_inputs):
|
| 781 |
+
assert len(static_inputs) == len(new_inputs)
|
| 782 |
+
for idx, (dst, src, expanded_dims) in enumerate(
|
| 783 |
+
zip(static_inputs, new_inputs, inps_expanded_dims)
|
| 784 |
+
):
|
| 785 |
+
if not isinstance(dst, torch.Tensor):
|
| 786 |
+
pass
|
| 787 |
+
elif idx in static_input_idxs:
|
| 788 |
+
assert dst.data_ptr() == src.data_ptr()
|
| 789 |
+
else:
|
| 790 |
+
# TODO - could make one single op of multiple slices
|
| 791 |
+
# and avoid dispatch.
|
| 792 |
+
# Could also pre-index the `dst` tensors
|
| 793 |
+
index_expanded_dims_and_copy_(dst, src, expanded_dims)
|
| 794 |
+
new_inputs.clear()
|
| 795 |
+
graph.replay()
|
| 796 |
+
return static_outputs
|
| 797 |
+
|
| 798 |
+
else:
|
| 799 |
+
copy_indices = [
|
| 800 |
+
idx for idx in range(len(static_inputs)) if idx not in static_input_idxs
|
| 801 |
+
]
|
| 802 |
+
|
| 803 |
+
def run(new_inputs):
|
| 804 |
+
for idx in copy_indices:
|
| 805 |
+
expanded_dims = inps_expanded_dims[idx]
|
| 806 |
+
index_expanded_dims_and_copy_(
|
| 807 |
+
static_inputs[idx], new_inputs[idx], expanded_dims
|
| 808 |
+
)
|
| 809 |
+
new_inputs.clear()
|
| 810 |
+
graph.replay()
|
| 811 |
+
return static_outputs
|
| 812 |
+
|
| 813 |
+
return align_inputs_from_check_idxs(run, check_input_idxs)
|
| 814 |
+
|
| 815 |
+
|
| 816 |
+
def count_tangents(fx_g: torch.fx.GraphModule):
|
| 817 |
+
"""
|
| 818 |
+
Infers which inputs are static for a backwards graph
|
| 819 |
+
"""
|
| 820 |
+
|
| 821 |
+
def is_saved_tensor(x):
|
| 822 |
+
return (
|
| 823 |
+
"tangents" not in x.name
|
| 824 |
+
and "bwd_seed" not in x.name
|
| 825 |
+
and "bwd_base_offset" not in x.name
|
| 826 |
+
)
|
| 827 |
+
|
| 828 |
+
arg_count = 0
|
| 829 |
+
static_arg_idxs = []
|
| 830 |
+
for n in fx_g.graph.nodes:
|
| 831 |
+
if n.op == "placeholder":
|
| 832 |
+
if is_saved_tensor(n):
|
| 833 |
+
static_arg_idxs.append(arg_count)
|
| 834 |
+
arg_count += 1
|
| 835 |
+
|
| 836 |
+
assert static_arg_idxs == list(range(len(static_arg_idxs)))
|
| 837 |
+
return len(static_arg_idxs)
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
_in_aot_compilation = BoxedBool(False)
|
| 841 |
+
|
| 842 |
+
|
| 843 |
+
def compile_fx_aot(
|
| 844 |
+
model_: torch.fx.GraphModule,
|
| 845 |
+
example_inputs_: List[torch.Tensor],
|
| 846 |
+
inner_compile: Callable[..., Any] = compile_fx_inner,
|
| 847 |
+
config_patches: Optional[Dict[str, Any]] = None,
|
| 848 |
+
):
|
| 849 |
+
config_patches = (
|
| 850 |
+
{"cpp_wrapper": True}
|
| 851 |
+
if config_patches is None
|
| 852 |
+
else {**config_patches, "cpp_wrapper": True}
|
| 853 |
+
)
|
| 854 |
+
if (
|
| 855 |
+
"aot_inductor_output_path" not in config_patches
|
| 856 |
+
and not config.aot_inductor_output_path
|
| 857 |
+
):
|
| 858 |
+
config_patches = {
|
| 859 |
+
**config_patches,
|
| 860 |
+
"aot_inductor_output_path": code_hash(model_.code),
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
with mock.patch.object(_in_aot_compilation, "value", True):
|
| 864 |
+
return compile_fx(
|
| 865 |
+
model_,
|
| 866 |
+
example_inputs_,
|
| 867 |
+
inner_compile=functools.partial(inner_compile, aot_mode=True),
|
| 868 |
+
config_patches=config_patches,
|
| 869 |
+
)
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
_graph_counter = itertools.count(0)
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def fw_compiler_freezing(
|
| 876 |
+
aot_autograd_model: torch.fx.GraphModule,
|
| 877 |
+
aot_example_inputs: List[torch.Tensor],
|
| 878 |
+
dynamo_model: torch.fx.GraphModule,
|
| 879 |
+
num_example_inputs: int,
|
| 880 |
+
inner_compile: Callable[..., Any],
|
| 881 |
+
cudagraphs: BoxedBool,
|
| 882 |
+
graph_id: int,
|
| 883 |
+
forward_device: BoxedDeviceIndex,
|
| 884 |
+
):
|
| 885 |
+
from torch._inductor.freezing import convert_conv_weights_to_channels_last, freeze
|
| 886 |
+
|
| 887 |
+
# partition_fn won't be called
|
| 888 |
+
joint_graph_passes(aot_autograd_model)
|
| 889 |
+
|
| 890 |
+
layout_opt = GraphLowering.decide_layout_opt(aot_autograd_model)
|
| 891 |
+
if layout_opt:
|
| 892 |
+
# make sure meta['val'] is properly setup
|
| 893 |
+
fake_tensor_prop(aot_autograd_model, aot_example_inputs, True)
|
| 894 |
+
convert_conv_weights_to_channels_last(aot_autograd_model)
|
| 895 |
+
|
| 896 |
+
opt_model, preserved_arg_indices = freeze(
|
| 897 |
+
dynamo_model,
|
| 898 |
+
aot_autograd_model,
|
| 899 |
+
aot_example_inputs, # type: ignore[arg-type]
|
| 900 |
+
)
|
| 901 |
+
|
| 902 |
+
aot_example_inputs = [aot_example_inputs[ind] for ind in preserved_arg_indices]
|
| 903 |
+
num_fixed = len(preserved_arg_indices) - num_example_inputs
|
| 904 |
+
|
| 905 |
+
fake_mode = detect_fake_mode(aot_example_inputs)
|
| 906 |
+
|
| 907 |
+
# for freezing, all graph outputs should be user visible
|
| 908 |
+
*_, model_outputs_node = opt_model.graph.nodes
|
| 909 |
+
model_outputs = model_outputs_node.args[0]
|
| 910 |
+
user_visible_outputs = [
|
| 911 |
+
n.name for n in model_outputs if isinstance(n, torch.fx.Node)
|
| 912 |
+
]
|
| 913 |
+
|
| 914 |
+
# constant params will be real tensors, not fake
|
| 915 |
+
tracing_context = torch._guards.TracingContext.get()
|
| 916 |
+
assert tracing_context is not None
|
| 917 |
+
params_flat = tracing_context.params_flat
|
| 918 |
+
assert params_flat is not None
|
| 919 |
+
for i in range(len(params_flat)):
|
| 920 |
+
if i not in preserved_arg_indices:
|
| 921 |
+
params_flat[i] = None
|
| 922 |
+
|
| 923 |
+
with mock.patch.object(fake_mode, "allow_non_fake_inputs", True):
|
| 924 |
+
optimized_function = inner_compile(
|
| 925 |
+
opt_model,
|
| 926 |
+
aot_example_inputs,
|
| 927 |
+
num_fixed=num_fixed,
|
| 928 |
+
cudagraphs=cudagraphs,
|
| 929 |
+
graph_id=graph_id,
|
| 930 |
+
is_inference=True,
|
| 931 |
+
boxed_forward_device_index=forward_device,
|
| 932 |
+
layout_opt=layout_opt,
|
| 933 |
+
user_visible_outputs=user_visible_outputs,
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
# aot_inductor codegens a call that takes in just the inputs, so we don't return a wrapper
|
| 937 |
+
# that drops constant-ified params
|
| 938 |
+
if _in_aot_compilation:
|
| 939 |
+
return optimized_function
|
| 940 |
+
|
| 941 |
+
def wrapper(args):
|
| 942 |
+
args_new = [args[i] for i in preserved_arg_indices]
|
| 943 |
+
args.clear()
|
| 944 |
+
return optimized_function(args_new)
|
| 945 |
+
|
| 946 |
+
wrapper._boxed_call = True # type: ignore[attr-defined]
|
| 947 |
+
|
| 948 |
+
return wrapper
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
def compile_fx(
|
| 952 |
+
model_: torch.fx.GraphModule,
|
| 953 |
+
example_inputs_: List[torch.Tensor],
|
| 954 |
+
inner_compile: Callable[..., Any] = compile_fx_inner,
|
| 955 |
+
config_patches: Optional[Dict[str, Any]] = None,
|
| 956 |
+
decompositions: Optional[Dict[OpOverload, Callable[..., Any]]] = None,
|
| 957 |
+
):
|
| 958 |
+
"""Main entrypoint to a compile given FX graph"""
|
| 959 |
+
if config_patches:
|
| 960 |
+
with config.patch(config_patches): # type: ignore[attr-defined]
|
| 961 |
+
return compile_fx(
|
| 962 |
+
model_,
|
| 963 |
+
example_inputs_,
|
| 964 |
+
# need extra layer of patching as backwards is compiled out of scope
|
| 965 |
+
inner_compile=config.patch(config_patches)(inner_compile), # type: ignore[attr-defined]
|
| 966 |
+
decompositions=decompositions,
|
| 967 |
+
)
|
| 968 |
+
|
| 969 |
+
if config.cpp_wrapper:
|
| 970 |
+
with config.patch( # type: ignore[attr-defined]
|
| 971 |
+
{
|
| 972 |
+
"cpp_wrapper": False,
|
| 973 |
+
"triton.autotune_cublasLt": False,
|
| 974 |
+
"triton.cudagraphs": False,
|
| 975 |
+
# CudaWrapperCodeGen relies on kernel name to find the autotuned cubin file
|
| 976 |
+
"triton.unique_kernel_names": True,
|
| 977 |
+
}
|
| 978 |
+
), V.set_real_inputs(
|
| 979 |
+
example_inputs_
|
| 980 |
+
): # type: ignore[call-arg]
|
| 981 |
+
return compile_fx(
|
| 982 |
+
model_,
|
| 983 |
+
example_inputs_,
|
| 984 |
+
inner_compile=inner_compile_with_cpp_wrapper(inner_compile),
|
| 985 |
+
decompositions=decompositions,
|
| 986 |
+
)
|
| 987 |
+
|
| 988 |
+
recursive_compile_fx = functools.partial(
|
| 989 |
+
compile_fx,
|
| 990 |
+
inner_compile=inner_compile,
|
| 991 |
+
decompositions=decompositions,
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
if not graph_returns_tuple(model_):
|
| 995 |
+
return make_graph_return_tuple(
|
| 996 |
+
model_,
|
| 997 |
+
example_inputs_,
|
| 998 |
+
recursive_compile_fx,
|
| 999 |
+
)
|
| 1000 |
+
|
| 1001 |
+
if isinstance(model_, torch.fx.GraphModule):
|
| 1002 |
+
if isinstance(model_.graph._codegen, _PyTreeCodeGen):
|
| 1003 |
+
# this graph is the result of dynamo.export()
|
| 1004 |
+
return handle_dynamo_export_graph(
|
| 1005 |
+
model_,
|
| 1006 |
+
example_inputs_,
|
| 1007 |
+
recursive_compile_fx,
|
| 1008 |
+
)
|
| 1009 |
+
|
| 1010 |
+
# Since handle_dynamo_export_graph will trigger compile_fx again,
|
| 1011 |
+
# Move these passes after handle_dynamo_export_graph to avoid repeated calls.
|
| 1012 |
+
model_ = pre_grad_passes(model_, example_inputs_)
|
| 1013 |
+
|
| 1014 |
+
if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):
|
| 1015 |
+
return flatten_graph_inputs(
|
| 1016 |
+
model_,
|
| 1017 |
+
example_inputs_,
|
| 1018 |
+
recursive_compile_fx,
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
assert not config._raise_error_for_testing
|
| 1022 |
+
num_example_inputs = len(example_inputs_)
|
| 1023 |
+
cudagraphs = BoxedBool(config.triton.cudagraphs)
|
| 1024 |
+
forward_device = BoxedDeviceIndex(None)
|
| 1025 |
+
|
| 1026 |
+
graph_id = next(_graph_counter)
|
| 1027 |
+
|
| 1028 |
+
decompositions = (
|
| 1029 |
+
decompositions if decompositions is not None else select_decomp_table()
|
| 1030 |
+
)
|
| 1031 |
+
|
| 1032 |
+
@dynamo_utils.dynamo_timed
|
| 1033 |
+
def fw_compiler_base(
|
| 1034 |
+
model: torch.fx.GraphModule,
|
| 1035 |
+
example_inputs: List[torch.Tensor],
|
| 1036 |
+
is_inference: bool,
|
| 1037 |
+
):
|
| 1038 |
+
if is_inference:
|
| 1039 |
+
# partition_fn won't be called
|
| 1040 |
+
joint_graph_passes(model)
|
| 1041 |
+
|
| 1042 |
+
num_rng_seed_offset_inputs = 2 if functorch_config.functionalize_rng_ops else 0
|
| 1043 |
+
fixed = len(example_inputs) - num_example_inputs - num_rng_seed_offset_inputs
|
| 1044 |
+
user_visible_outputs = set()
|
| 1045 |
+
|
| 1046 |
+
if config.keep_output_stride:
|
| 1047 |
+
*_, model_outputs_node = model.graph.nodes
|
| 1048 |
+
assert model_outputs_node.op == "output"
|
| 1049 |
+
model_outputs, _ = pytree.tree_flatten(model_outputs_node.args)
|
| 1050 |
+
num_model_outputs = len(model_outputs)
|
| 1051 |
+
|
| 1052 |
+
context = torch._guards.TracingContext.get()
|
| 1053 |
+
if context is not None and context.fw_metadata:
|
| 1054 |
+
original_output_start_index = context.fw_metadata.num_mutated_inputs
|
| 1055 |
+
else:
|
| 1056 |
+
original_output_start_index = 0
|
| 1057 |
+
|
| 1058 |
+
if isinstance(model_, torch.fx.GraphModule):
|
| 1059 |
+
*_, orig_model_outputs_node = model_.graph.nodes
|
| 1060 |
+
assert orig_model_outputs_node.op == "output"
|
| 1061 |
+
orig_model_outputs, _ = pytree.tree_flatten(
|
| 1062 |
+
orig_model_outputs_node.args
|
| 1063 |
+
)
|
| 1064 |
+
num_orig_model_outputs = len(orig_model_outputs)
|
| 1065 |
+
else:
|
| 1066 |
+
num_orig_model_outputs = num_model_outputs
|
| 1067 |
+
|
| 1068 |
+
assert num_orig_model_outputs <= num_model_outputs
|
| 1069 |
+
|
| 1070 |
+
# We makes the following assumption
|
| 1071 |
+
# For inference
|
| 1072 |
+
# len(orig_model_outputs) == len(model_outputs)
|
| 1073 |
+
# For training
|
| 1074 |
+
# len(orig_model_outputs) <= len(model_outputs)
|
| 1075 |
+
# During training, most of the time the model_outputs starts with
|
| 1076 |
+
# orignal module's outputs followed by saved activations.
|
| 1077 |
+
# But this can be not true if the model have inplace updated tensors.
|
| 1078 |
+
# AOTAutograd will make those tensors being returned before the orignal
|
| 1079 |
+
# module's output.
|
| 1080 |
+
# To make things safe, we'll use original_output_start_index field
|
| 1081 |
+
# set by AOTAutograd to decide where the original module outputs start.
|
| 1082 |
+
|
| 1083 |
+
user_visible_outputs = {
|
| 1084 |
+
n.name
|
| 1085 |
+
for n in model_outputs[
|
| 1086 |
+
original_output_start_index : original_output_start_index
|
| 1087 |
+
+ num_orig_model_outputs
|
| 1088 |
+
]
|
| 1089 |
+
if isinstance(n, torch.fx.Node)
|
| 1090 |
+
}
|
| 1091 |
+
|
| 1092 |
+
return inner_compile(
|
| 1093 |
+
model,
|
| 1094 |
+
example_inputs,
|
| 1095 |
+
num_fixed=fixed,
|
| 1096 |
+
cudagraphs=cudagraphs,
|
| 1097 |
+
graph_id=graph_id,
|
| 1098 |
+
is_inference=is_inference,
|
| 1099 |
+
boxed_forward_device_index=forward_device,
|
| 1100 |
+
user_visible_outputs=user_visible_outputs,
|
| 1101 |
+
)
|
| 1102 |
+
|
| 1103 |
+
fw_compiler = functools.partial(fw_compiler_base, is_inference=False)
|
| 1104 |
+
|
| 1105 |
+
if config.freezing and not torch.is_grad_enabled():
|
| 1106 |
+
inference_compiler = functools.partial(
|
| 1107 |
+
fw_compiler_freezing,
|
| 1108 |
+
dynamo_model=model_,
|
| 1109 |
+
num_example_inputs=num_example_inputs,
|
| 1110 |
+
inner_compile=inner_compile,
|
| 1111 |
+
cudagraphs=cudagraphs,
|
| 1112 |
+
graph_id=graph_id,
|
| 1113 |
+
forward_device=forward_device,
|
| 1114 |
+
)
|
| 1115 |
+
else:
|
| 1116 |
+
inference_compiler = functools.partial(fw_compiler_base, is_inference=True)
|
| 1117 |
+
|
| 1118 |
+
def partition_fn(graph, joint_inputs, **kwargs):
|
| 1119 |
+
joint_graph_passes(graph)
|
| 1120 |
+
return min_cut_rematerialization_partition(
|
| 1121 |
+
graph, joint_inputs, **kwargs, compiler="inductor"
|
| 1122 |
+
)
|
| 1123 |
+
|
| 1124 |
+
@dynamo_utils.dynamo_timed
|
| 1125 |
+
def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
|
| 1126 |
+
fixed = count_tangents(model)
|
| 1127 |
+
return inner_compile(
|
| 1128 |
+
model,
|
| 1129 |
+
example_inputs,
|
| 1130 |
+
num_fixed=fixed,
|
| 1131 |
+
cudagraphs=cudagraphs,
|
| 1132 |
+
is_backward=True,
|
| 1133 |
+
graph_id=graph_id,
|
| 1134 |
+
boxed_forward_device_index=forward_device,
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
# TODO: can add logging before/after the call to create_aot_dispatcher_function
|
| 1138 |
+
# in torch._functorch/aot_autograd.py::aot_module_simplified::aot_function_simplified::new_func
|
| 1139 |
+
# once torchdynamo is merged into pytorch
|
| 1140 |
+
fake_mode = detect_fake_mode(example_inputs_) or torch._subclasses.FakeTensorMode(
|
| 1141 |
+
allow_non_fake_inputs=True
|
| 1142 |
+
)
|
| 1143 |
+
tracing_context = (
|
| 1144 |
+
torch._guards.TracingContext.get() or torch._guards.TracingContext(fake_mode)
|
| 1145 |
+
)
|
| 1146 |
+
|
| 1147 |
+
with V.set_fake_mode(fake_mode), torch._guards.tracing( # type: ignore[call-arg]
|
| 1148 |
+
tracing_context
|
| 1149 |
+
), compiled_autograd.disable():
|
| 1150 |
+
return aot_autograd(
|
| 1151 |
+
fw_compiler=fw_compiler,
|
| 1152 |
+
bw_compiler=bw_compiler,
|
| 1153 |
+
inference_compiler=inference_compiler,
|
| 1154 |
+
decompositions=decompositions,
|
| 1155 |
+
partition_fn=partition_fn,
|
| 1156 |
+
keep_inference_input_mutations=True,
|
| 1157 |
+
)(model_, example_inputs_)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
# pass config dict back to user
|
| 1161 |
+
def get_patched_config_dict(config_patches=None):
|
| 1162 |
+
with config.patch(config_patches): # type: ignore[attr-defined]
|
| 1163 |
+
return config.get_config_copy() # type: ignore[attr-defined]
|
| 1164 |
+
|
| 1165 |
+
|
| 1166 |
+
def _shape_env_from_inputs(inputs: List[torch.Tensor]):
|
| 1167 |
+
shape_env = None
|
| 1168 |
+
fake_mode = detect_fake_mode(inputs)
|
| 1169 |
+
|
| 1170 |
+
# TODO(voz): It would be nice to enable this assert, but there are lots of tests that
|
| 1171 |
+
# pass in real inputs for now.
|
| 1172 |
+
# if len(inputs) > 0:
|
| 1173 |
+
# assert fake_mode is not None, breakpoint()
|
| 1174 |
+
|
| 1175 |
+
if fake_mode is not None:
|
| 1176 |
+
return fake_mode.shape_env
|
| 1177 |
+
|
| 1178 |
+
# When there are no tensor inputs, get shape_env from the first SymInt.
|
| 1179 |
+
for input in inputs:
|
| 1180 |
+
if isinstance(input, torch.SymInt):
|
| 1181 |
+
return input.node.shape_env
|
| 1182 |
+
|
| 1183 |
+
# TODO(voz): Should we always have one anyway?
|
| 1184 |
+
return None
|
| 1185 |
+
|
| 1186 |
+
|
| 1187 |
+
def output_node(gm: torch.fx.GraphModule):
|
| 1188 |
+
"""Get the output node from an FX graph"""
|
| 1189 |
+
last_node = next(iter(reversed(gm.graph.nodes)))
|
| 1190 |
+
assert last_node.op == "output"
|
| 1191 |
+
return last_node
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
def graph_returns_tuple(gm: torch.fx.GraphModule):
|
| 1195 |
+
"""True if a FX graph returns a tuple"""
|
| 1196 |
+
if not isinstance(gm, torch.fx.GraphModule):
|
| 1197 |
+
return True # can't check this, assume true
|
| 1198 |
+
(rv,) = output_node(gm).args
|
| 1199 |
+
if isinstance(rv, (list, tuple)):
|
| 1200 |
+
return True
|
| 1201 |
+
if (
|
| 1202 |
+
isinstance(rv, torch.fx.node.Node)
|
| 1203 |
+
and hasattr(rv.target, "_schema")
|
| 1204 |
+
and len(rv.target._schema.returns) > 1
|
| 1205 |
+
and all(str(ret.type) == "Tensor" for ret in rv.target._schema.returns)
|
| 1206 |
+
):
|
| 1207 |
+
# for graphs whose result is one node with multiple outputs
|
| 1208 |
+
return True
|
| 1209 |
+
return False
|
| 1210 |
+
|
| 1211 |
+
|
| 1212 |
+
def make_graph_return_tuple(
|
| 1213 |
+
gm: torch.fx.GraphModule,
|
| 1214 |
+
inputs: List[torch.Tensor],
|
| 1215 |
+
compile_gm: Callable[..., Any],
|
| 1216 |
+
):
|
| 1217 |
+
"""
|
| 1218 |
+
Mutate gm so it returns a tuple. This is only needed for graphs
|
| 1219 |
+
not created by torchdynamo that return non-tuples.
|
| 1220 |
+
"""
|
| 1221 |
+
node = output_node(gm)
|
| 1222 |
+
(rv,) = node.args
|
| 1223 |
+
rv, spec = pytree.tree_flatten(rv)
|
| 1224 |
+
with gm.graph.inserting_before(node):
|
| 1225 |
+
gm.graph.output(rv)
|
| 1226 |
+
gm.graph.erase_node(node)
|
| 1227 |
+
assert graph_returns_tuple(gm)
|
| 1228 |
+
|
| 1229 |
+
compiled_fn = compile_gm(gm, inputs)
|
| 1230 |
+
|
| 1231 |
+
@functools.wraps(compiled_fn)
|
| 1232 |
+
def wrapper(*args, **kwargs):
|
| 1233 |
+
return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)
|
| 1234 |
+
|
| 1235 |
+
return wrapper
|
| 1236 |
+
|
| 1237 |
+
|
| 1238 |
+
def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm):
|
| 1239 |
+
"""
|
| 1240 |
+
Mutate inputs so that they are flat and wrap gm such that it
|
| 1241 |
+
accepts those inputs. This is only needed for graphs not created
|
| 1242 |
+
by torchdynamo that take bumpy inputs.
|
| 1243 |
+
"""
|
| 1244 |
+
inputs, spec = pytree.tree_flatten(inputs)
|
| 1245 |
+
|
| 1246 |
+
class GmWrapper(torch.nn.Module):
|
| 1247 |
+
def __init__(self):
|
| 1248 |
+
super().__init__()
|
| 1249 |
+
self.gm = gm
|
| 1250 |
+
|
| 1251 |
+
def forward(self, *args):
|
| 1252 |
+
args: List[Any] = list(args)
|
| 1253 |
+
return self.gm(*pytree.tree_unflatten(args, spec))
|
| 1254 |
+
|
| 1255 |
+
compiled_fn = compile_gm(GmWrapper(), inputs)
|
| 1256 |
+
|
| 1257 |
+
@functools.wraps(compiled_fn)
|
| 1258 |
+
def wrapper(*args):
|
| 1259 |
+
# note this doesn't check the spec, assuming it is the same
|
| 1260 |
+
return compiled_fn(*pytree.tree_flatten(args)[0])
|
| 1261 |
+
|
| 1262 |
+
return wrapper
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
def handle_dynamo_export_graph(
|
| 1266 |
+
gm: torch.fx.GraphModule,
|
| 1267 |
+
inputs: List[torch.Tensor],
|
| 1268 |
+
compile_gm: Callable[..., Any],
|
| 1269 |
+
):
|
| 1270 |
+
"""
|
| 1271 |
+
`torch._dynamo.export` embeds pytrees in the FX graph codegen object,
|
| 1272 |
+
convert that to a normal FX graph so inductor can compile it.
|
| 1273 |
+
"""
|
| 1274 |
+
codegen = gm.graph._codegen
|
| 1275 |
+
gm.graph._codegen = torch.fx.graph.CodeGen()
|
| 1276 |
+
gm.recompile()
|
| 1277 |
+
|
| 1278 |
+
compiled_fn = compile_gm(gm, codegen.process_inputs(*inputs))
|
| 1279 |
+
|
| 1280 |
+
@functools.wraps(compiled_fn)
|
| 1281 |
+
def wrapper(*args):
|
| 1282 |
+
return codegen.process_outputs(compiled_fn(*codegen.process_inputs(*args)))
|
| 1283 |
+
|
| 1284 |
+
return wrapper
|
llava_next/lib/python3.10/site-packages/torch/_inductor/config.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
# add some debug printouts
|
| 7 |
+
debug = False
|
| 8 |
+
|
| 9 |
+
# Whether to disable a progress bar for autotuning
|
| 10 |
+
disable_progress = True
|
| 11 |
+
|
| 12 |
+
# Whether to enable printing the source code for each future
|
| 13 |
+
verbose_progress = False
|
| 14 |
+
|
| 15 |
+
# use cpp wrapper instead of python wrapper
|
| 16 |
+
cpp_wrapper = False
|
| 17 |
+
|
| 18 |
+
# dead code elimination
|
| 19 |
+
dce = False
|
| 20 |
+
|
| 21 |
+
# assume weight tensors are fixed size
|
| 22 |
+
static_weight_shapes = True
|
| 23 |
+
|
| 24 |
+
# put correctness assertions in generated code
|
| 25 |
+
size_asserts = os.environ.get("TORCHINDUCTOR_SIZE_ASSERTS", "1") == "1"
|
| 26 |
+
|
| 27 |
+
# enable loop reordering based on input orders
|
| 28 |
+
pick_loop_orders = True
|
| 29 |
+
|
| 30 |
+
# reuse a kernel input as the output
|
| 31 |
+
inplace_buffers = True
|
| 32 |
+
|
| 33 |
+
# reuse a buffer for an unrelated purpose
|
| 34 |
+
allow_buffer_reuse = True
|
| 35 |
+
|
| 36 |
+
# codegen benchmark harness
|
| 37 |
+
benchmark_harness = True
|
| 38 |
+
|
| 39 |
+
# fuse pointwise into templates
|
| 40 |
+
epilogue_fusion = True
|
| 41 |
+
|
| 42 |
+
# do epilogue fusions before other fusions
|
| 43 |
+
epilogue_fusion_first = False
|
| 44 |
+
|
| 45 |
+
# enable pattern match+replace optimizations
|
| 46 |
+
pattern_matcher = True
|
| 47 |
+
|
| 48 |
+
# Optimize away split cat patterns (Experimental)
|
| 49 |
+
split_cat_fx_passes = True
|
| 50 |
+
|
| 51 |
+
# enable pattern match with group fusion (using fbgemm)
|
| 52 |
+
group_fusion = False
|
| 53 |
+
|
| 54 |
+
# enable pattern match with batch fusion (using torch op)
|
| 55 |
+
batch_fusion = True
|
| 56 |
+
|
| 57 |
+
# enable reordering pass
|
| 58 |
+
reordering = True
|
| 59 |
+
|
| 60 |
+
# for pattern torch.mm(a, b.to(dtype)) with cuda tensors,
|
| 61 |
+
# enable torch._inductor.kernel.mm.tuned_mixed_mm fused kernel.
|
| 62 |
+
# Autotune will compare perf with normal cast->then->mm option
|
| 63 |
+
use_mixed_mm = False
|
| 64 |
+
|
| 65 |
+
# for pattern torch.mm(a, b.to(dtype)) with cuda tensors, always use
|
| 66 |
+
# torch._inductor.kernel.mm.tuned_mixed_mm's fused kernel.
|
| 67 |
+
# Autotune will not compare with normal cast->then->mm option.
|
| 68 |
+
# (if force_mixed_mm is true, the use_mixed_mm flag will be ignored)
|
| 69 |
+
force_mixed_mm = False
|
| 70 |
+
|
| 71 |
+
# AOTInductor output path
|
| 72 |
+
# If an absolute path is specified, the generated lib files will be stored under the directory;
|
| 73 |
+
# If a relative path is specified, it will be used as a subdirectory under the default caching path;
|
| 74 |
+
# If not specified, a temp directory will be created under the default caching path
|
| 75 |
+
aot_inductor_output_path = ""
|
| 76 |
+
|
| 77 |
+
# enable slow autotuning passes to select algorithms
|
| 78 |
+
max_autotune = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE") == "1"
|
| 79 |
+
|
| 80 |
+
# enable slow autotuning passes to select pointwise/reductions algorithms
|
| 81 |
+
max_autotune_pointwise = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_POINTWISE") == "1"
|
| 82 |
+
|
| 83 |
+
# enable slow autotuning passes to select gemm algorithms
|
| 84 |
+
max_autotune_gemm = os.environ.get("TORCHINDUCTOR_MAX_AUTOTUNE_GEMM") == "1"
|
| 85 |
+
|
| 86 |
+
# Specify candidate backends for gemm autotune.
|
| 87 |
+
# Possible choices are combinations of: ATen, Triton.
|
| 88 |
+
# ATen: default Pytorch ATen kernels.
|
| 89 |
+
# Triton: Triton templates defined in torch inductor.
|
| 90 |
+
max_autotune_gemm_backends = os.environ.get(
|
| 91 |
+
"TORCHINDUCTOR_MAX_AUTOTUNE_GEMM_BACKENDS", "ATEN,TRITON"
|
| 92 |
+
).upper()
|
| 93 |
+
|
| 94 |
+
# enable searching global and local cache regardless of `max_autotune`
|
| 95 |
+
search_autotune_cache = os.environ.get("TORCHINDUCTOR_SEARCH_AUTOTUNE_CACHE") == "1"
|
| 96 |
+
|
| 97 |
+
save_args = os.environ.get("TORCHINDUCTOR_SAVE_ARGS") == "1"
|
| 98 |
+
|
| 99 |
+
# We will disable creating subprocess for autotuning if this is False
|
| 100 |
+
autotune_in_subproc = os.environ.get("TORCHINDUCTOR_AUTOTUNE_IN_SUBPROC") == "1"
|
| 101 |
+
|
| 102 |
+
coordinate_descent_tuning = (
|
| 103 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_TUNING") == "1"
|
| 104 |
+
)
|
| 105 |
+
coordinate_descent_check_all_directions = (
|
| 106 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_CHECK_ALL_DIRECTIONS") == "1"
|
| 107 |
+
)
|
| 108 |
+
coordinate_descent_search_radius = int(
|
| 109 |
+
os.environ.get("TORCHINDUCTOR_COORDINATE_DESCENT_RADIUS", "1")
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
layout_optimization = os.environ.get("TORCHINDUCTOR_LAYOUT_OPTIMIZATION", "1") == "1"
|
| 113 |
+
|
| 114 |
+
# Whether to keep the output strides the same as eager after layout optimization.
|
| 115 |
+
keep_output_stride = os.environ.get("TORCHINDUCTOR_KEEP_OUTPUT_STRIDE", "1") == "1"
|
| 116 |
+
|
| 117 |
+
# Enabling this will let compiler print warning messages if a generated triton
|
| 118 |
+
# kernel has inputs with mixed layouts. This is helpful for perf debugging
|
| 119 |
+
# since kernel with mixed layout inputs may run much slower then one whose inputs
|
| 120 |
+
# have uniform layouts.
|
| 121 |
+
warn_mix_layout = os.environ.get("TORCHINDUCTOR_WARN_MIX_LAYOUT") == "1"
|
| 122 |
+
|
| 123 |
+
# control store vs recompute heuristic
|
| 124 |
+
# For fanouts, rematerialization can lead to exponential blowup. So, have
|
| 125 |
+
# smaller threshold
|
| 126 |
+
realize_reads_threshold = 4
|
| 127 |
+
realize_bytes_threshold = 2000
|
| 128 |
+
|
| 129 |
+
# Threshold to prevent excessive accumulation of ops in one buffer during lowering
|
| 130 |
+
realize_acc_reads_threshold = 8
|
| 131 |
+
|
| 132 |
+
# fallback to eager for random/dropout, this is slow but useful for debugging
|
| 133 |
+
fallback_random = False
|
| 134 |
+
|
| 135 |
+
# automatically create fallbacks when encountering an unhandled op
|
| 136 |
+
implicit_fallbacks = True
|
| 137 |
+
|
| 138 |
+
# fuse even in cases without common reads
|
| 139 |
+
aggressive_fusion = False
|
| 140 |
+
|
| 141 |
+
# how many nodes to allow into a single fusion
|
| 142 |
+
max_fusion_size = 64
|
| 143 |
+
|
| 144 |
+
# replace small reductions with pointwise, disable with `= 1`
|
| 145 |
+
unroll_reductions_threshold = 8
|
| 146 |
+
|
| 147 |
+
# Add extra comments to output code (causes compile cache misses)
|
| 148 |
+
comment_origin = False
|
| 149 |
+
|
| 150 |
+
# Convert 1x1 convs into matmuls
|
| 151 |
+
conv_1x1_as_mm = False
|
| 152 |
+
|
| 153 |
+
# Enable split reductions for better utilization when the dimension
|
| 154 |
+
# being reduced over is large (by splitting it)
|
| 155 |
+
split_reductions = True
|
| 156 |
+
|
| 157 |
+
benchmark_kernel = os.environ.get("TORCHINDUCTOR_BENCHMARK_KERNEL", "0") == "1"
|
| 158 |
+
|
| 159 |
+
# Enable constant and index_expr folding
|
| 160 |
+
constant_and_index_propagation = True
|
| 161 |
+
|
| 162 |
+
# constant folding on the joint graph
|
| 163 |
+
joint_graph_constant_folding = True
|
| 164 |
+
|
| 165 |
+
# Enable indirect_indexing asserts for decompositions and lowerings
|
| 166 |
+
debug_index_asserts = False
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def is_fbcode():
|
| 170 |
+
return not hasattr(torch.version, "git_version")
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# warnings intended for PyTorch developers, disable for point releases
|
| 174 |
+
is_nightly_or_source = "dev" in torch.__version__ or "git" in torch.__version__
|
| 175 |
+
developer_warnings = is_fbcode() or is_nightly_or_source
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def decide_compile_threads():
|
| 179 |
+
"""
|
| 180 |
+
Here are the precedence to decide compile_threads
|
| 181 |
+
1. User can override it by TORCHINDUCTOR_COMPILE_THREADS. One may want to disable async compiling by
|
| 182 |
+
setting this to 1 to make pdb happy.
|
| 183 |
+
2. Set to 1 if it's win32 platform or it's a fbcode build
|
| 184 |
+
3. decide by the number of CPU cores
|
| 185 |
+
"""
|
| 186 |
+
if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
|
| 187 |
+
return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
|
| 188 |
+
elif sys.platform == "win32" or is_fbcode():
|
| 189 |
+
return 1
|
| 190 |
+
else:
|
| 191 |
+
cpu_count = (
|
| 192 |
+
len(os.sched_getaffinity(0))
|
| 193 |
+
if hasattr(os, "sched_getaffinity")
|
| 194 |
+
else os.cpu_count()
|
| 195 |
+
)
|
| 196 |
+
assert cpu_count
|
| 197 |
+
return min(32, cpu_count)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
compile_threads = decide_compile_threads()
|
| 201 |
+
|
| 202 |
+
# gemm autotuning global cache dir
|
| 203 |
+
if is_fbcode():
|
| 204 |
+
from libfb.py import parutil # type: ignore[import]
|
| 205 |
+
|
| 206 |
+
try:
|
| 207 |
+
if __package__:
|
| 208 |
+
global_cache_dir = parutil.get_dir_path(
|
| 209 |
+
os.path.join(__package__.replace(".", os.sep), "fb/cache")
|
| 210 |
+
)
|
| 211 |
+
else:
|
| 212 |
+
global_cache_dir = parutil.get_dir_path("fb/cache")
|
| 213 |
+
except ValueError:
|
| 214 |
+
global_cache_dir = None
|
| 215 |
+
else:
|
| 216 |
+
global_cache_dir = None
|
| 217 |
+
|
| 218 |
+
# If kernel is fused, the name is generated from the origin node op names
|
| 219 |
+
# for larger kernels limit this
|
| 220 |
+
kernel_name_max_ops = 10
|
| 221 |
+
|
| 222 |
+
# Pad input tensors of matmul/bmm/addmm to leverage Tensor Cores in NVIDIA GPUs
|
| 223 |
+
shape_padding = os.environ.get("TORCHINDUCTOR_SHAPE_PADDING", "1") == "1"
|
| 224 |
+
|
| 225 |
+
# Fx-based linear/matmul/bmm + permute/transpose vertical fusion
|
| 226 |
+
permute_fusion = os.environ.get("TORCHINDUCTOR_PERMUTE_FUSION", "0") == "1"
|
| 227 |
+
|
| 228 |
+
# Mark the wrapper call in PyTorch profiler
|
| 229 |
+
profiler_mark_wrapper_call = False
|
| 230 |
+
|
| 231 |
+
# Generate hook calls to torch._inductor.hooks.run_intermediate_hooks for
|
| 232 |
+
# every intermediate for which we can correlate it with an intermediate
|
| 233 |
+
# from the original FX graph
|
| 234 |
+
generate_intermediate_hooks = False
|
| 235 |
+
|
| 236 |
+
# Populate traceback field on IRNode; good for debugging why origin_node is
|
| 237 |
+
# not populated, or finding out where an IRNode was constructed
|
| 238 |
+
debug_ir_traceback = False
|
| 239 |
+
|
| 240 |
+
# used for debugging to make sure config is properly set
|
| 241 |
+
_raise_error_for_testing = False
|
| 242 |
+
|
| 243 |
+
_profile_var = os.environ.get("TORCHINDUCTOR_PROFILE", "")
|
| 244 |
+
profile_bandwidth = _profile_var != ""
|
| 245 |
+
profile_bandwidth_regex = "" if _profile_var == "1" else _profile_var
|
| 246 |
+
|
| 247 |
+
# TODO: remove later
|
| 248 |
+
disable_cpp_codegen = False
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# Freezing will attempt to inline weights as constants in optimization
|
| 252 |
+
# and run constant folding and other optimizations on them. After freezing, weights
|
| 253 |
+
# can no longer be updated.
|
| 254 |
+
freezing: bool = os.environ.get("TORCHINDUCTOR_FREEZING", "0") == "1"
|
| 255 |
+
|
| 256 |
+
# Make freezing invalidate the eager Parameters of nn modules, to avoid memory overhead
|
| 257 |
+
# of potentially keeping multiple copies of weights.
|
| 258 |
+
freezing_discard_parameters: bool = False
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# config specific to codegen/cpp.py
|
| 262 |
+
class cpp:
|
| 263 |
+
# set to torch.get_num_threads()
|
| 264 |
+
threads = -1
|
| 265 |
+
|
| 266 |
+
# Do not generate loops when the condition doesn't hold, like:
|
| 267 |
+
# for(long i0=4096; i0<4096; i0+=1)
|
| 268 |
+
no_redundant_loops = True
|
| 269 |
+
|
| 270 |
+
# Assume number of threads is dynamic, don't specialize thread number.
|
| 271 |
+
# Kernels don't recompile on thread number changes with this flag on.
|
| 272 |
+
# For single-threaded workload, turning it on would incur a slight
|
| 273 |
+
# performance degradation.
|
| 274 |
+
dynamic_threads = False
|
| 275 |
+
|
| 276 |
+
simdlen = None
|
| 277 |
+
min_chunk_size = 4096
|
| 278 |
+
cxx = (
|
| 279 |
+
None, # download gcc12 from conda-forge if conda is installed
|
| 280 |
+
# "g++-12",
|
| 281 |
+
# "g++-11",
|
| 282 |
+
# "g++-10",
|
| 283 |
+
# "clang++",
|
| 284 |
+
os.environ.get("CXX", "g++"),
|
| 285 |
+
# "g++.par",
|
| 286 |
+
)
|
| 287 |
+
# Allow kernel performance profiling via PyTorch profiler
|
| 288 |
+
enable_kernel_profile = False
|
| 289 |
+
|
| 290 |
+
# enable weight prepacking to get a better performance; may lead to large memory footprint
|
| 291 |
+
weight_prepack = True
|
| 292 |
+
|
| 293 |
+
# Inject a bug into our relu implementation; useful for testing our repro
|
| 294 |
+
# extraction and minification functionality.
|
| 295 |
+
# Valid values: "compile_error", "runtime_error", "accuracy"
|
| 296 |
+
inject_relu_bug_TESTING_ONLY = None
|
| 297 |
+
inject_log1p_bug_TESTING_ONLY = None
|
| 298 |
+
|
| 299 |
+
# If None, autodetect whether or not AVX512/AVX2 can be used. Otherwise,
|
| 300 |
+
# force usage as specified, without testing.
|
| 301 |
+
vec_isa_ok = None
|
| 302 |
+
|
| 303 |
+
# similar to config.triton.descriptive_names
|
| 304 |
+
descriptive_names = "original_aten"
|
| 305 |
+
|
| 306 |
+
# how many nodes to allow into a single horizontal fusion
|
| 307 |
+
max_horizontal_fusion_size = 16
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
# config specific to codegen/triton.py
|
| 311 |
+
class triton:
|
| 312 |
+
# Use cudagraphs on output code
|
| 313 |
+
cudagraphs = False
|
| 314 |
+
|
| 315 |
+
# Use cudagraph trees for memory pooling if `cudagraphs` is True
|
| 316 |
+
cudagraph_trees = not is_fbcode()
|
| 317 |
+
|
| 318 |
+
# assertions not on the fast path, steady state
|
| 319 |
+
slow_path_cudagraph_asserts = True
|
| 320 |
+
|
| 321 |
+
# TODO - need to debug why this prevents cleanup
|
| 322 |
+
cudagraph_trees_history_recording = False
|
| 323 |
+
|
| 324 |
+
# assertions on the fast path
|
| 325 |
+
fast_path_cudagraph_asserts = False
|
| 326 |
+
|
| 327 |
+
# skip warmup for cudagraph trees
|
| 328 |
+
skip_cudagraph_warmup = False
|
| 329 |
+
|
| 330 |
+
# Synchronize before and after every compiled graph.
|
| 331 |
+
debug_sync_graph = False
|
| 332 |
+
|
| 333 |
+
# Synchronize after every kernel launch, to help pinpoint bugs
|
| 334 |
+
debug_sync_kernel = False
|
| 335 |
+
|
| 336 |
+
# Always load full blocks (rather than broadcasting inside the block)
|
| 337 |
+
dense_indexing = False
|
| 338 |
+
|
| 339 |
+
# limit tiling dimensions
|
| 340 |
+
max_tiles = 2
|
| 341 |
+
|
| 342 |
+
# use triton.autotune for pointwise ops with complex layouts
|
| 343 |
+
# this should only be disabled for debugging/testing
|
| 344 |
+
autotune_pointwise = True
|
| 345 |
+
|
| 346 |
+
# max autotune gemm with cublasLt
|
| 347 |
+
autotune_cublasLt = True
|
| 348 |
+
|
| 349 |
+
# should we stop a fusion to allow better tiling?
|
| 350 |
+
tiling_prevents_pointwise_fusion = True
|
| 351 |
+
tiling_prevents_reduction_fusion = True
|
| 352 |
+
|
| 353 |
+
# assert that indirect indexing does not read / write out of bounds
|
| 354 |
+
assert_indirect_indexing = True
|
| 355 |
+
|
| 356 |
+
# should we give different names to kernels
|
| 357 |
+
# Note: This is orthogonal to descriptive_names - this is deciding whether
|
| 358 |
+
# our triton kernel names should all be `triton_` (to maximize caching) or
|
| 359 |
+
# whether they should be unique.
|
| 360 |
+
unique_kernel_names = os.environ.get("TORCHINDUCTOR_UNIQUE_KERNEL_NAMES") == "1"
|
| 361 |
+
|
| 362 |
+
# should we put op names in kernel names
|
| 363 |
+
# False: No special names (just triton__1, triton__2, etc.)
|
| 364 |
+
# "torch": Maps to the fx op in the Dynamo graph (module name, method name, etc.)
|
| 365 |
+
# "original_aten": Maps to the highest-level aten op (i.e. pre-decompositions)
|
| 366 |
+
# "inductor_node": Maps to the node name in the FX graph passed to Inductor
|
| 367 |
+
descriptive_names = "original_aten"
|
| 368 |
+
|
| 369 |
+
# use alternate codegen for smaller reductions
|
| 370 |
+
persistent_reductions = (
|
| 371 |
+
os.environ.get("TORCHINDUCTOR_PERSISTENT_REDUCTIONS", "1") == "1"
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
# hint to Triton when arguments are divisible by 16
|
| 375 |
+
divisible_by_16 = True
|
| 376 |
+
|
| 377 |
+
# theses are not enforced, but they are used by asserts in triton_heuristics.py
|
| 378 |
+
# NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048
|
| 379 |
+
max_block = {"X": 2048, "Y": 1024, "Z": 1024}
|
| 380 |
+
|
| 381 |
+
# Store the generated cubin files for cpp wrapper code to load
|
| 382 |
+
store_cubin = False
|
| 383 |
+
|
| 384 |
+
# the max number of spills we allow for the configs we benchmark.
|
| 385 |
+
# Setting this to 0 means we skip a config if it spills even a single
|
| 386 |
+
# register.
|
| 387 |
+
# Settting it to a larger value allows a config spilling a small amount
|
| 388 |
+
# of registers being benchmarked.
|
| 389 |
+
#
|
| 390 |
+
# NOTE: triton will always report >0 register spills for kernels using sin/cos.
|
| 391 |
+
# (check this issue https://github.com/openai/triton/issues/1756 )
|
| 392 |
+
# So far we see a fixed 8 spilled registers for kernels using sin/cos.
|
| 393 |
+
# Raise the threshold to 16 to be safe.
|
| 394 |
+
# We should revisit this once we understand more of the source of register spills.
|
| 395 |
+
spill_threshold: int = 16
|
| 396 |
+
|
| 397 |
+
# Inject a bug into our relu implementation; useful for testing our repro
|
| 398 |
+
# extraction and minification functionality.
|
| 399 |
+
# Valid values: "compile_error", "runtime_error", "accuracy"
|
| 400 |
+
inject_relu_bug_TESTING_ONLY = None
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
# create a directory containing lots of debug information
|
| 404 |
+
class trace:
|
| 405 |
+
# master switch for all debugging flags below
|
| 406 |
+
enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
|
| 407 |
+
|
| 408 |
+
# Save python logger call >=logging.DEBUG
|
| 409 |
+
debug_log = False
|
| 410 |
+
|
| 411 |
+
# Save python logger call >=logging.INFO
|
| 412 |
+
info_log = False
|
| 413 |
+
|
| 414 |
+
# Save input FX graph (post decomps, pre optimization)
|
| 415 |
+
fx_graph = True
|
| 416 |
+
|
| 417 |
+
# Save FX graph after transformations
|
| 418 |
+
fx_graph_transformed = True
|
| 419 |
+
|
| 420 |
+
# Save TorchInductor IR before fusion pass
|
| 421 |
+
ir_pre_fusion = True
|
| 422 |
+
|
| 423 |
+
# Save TorchInductor IR after fusion pass
|
| 424 |
+
ir_post_fusion = True
|
| 425 |
+
|
| 426 |
+
# Copy generated code to trace dir
|
| 427 |
+
output_code = True
|
| 428 |
+
|
| 429 |
+
# SVG figure showing post-fusion graph
|
| 430 |
+
graph_diagram = os.environ.get("INDUCTOR_POST_FUSION_SVG", "0") == "1"
|
| 431 |
+
|
| 432 |
+
# Store cProfile (see snakeviz to view)
|
| 433 |
+
compile_profile = False
|
| 434 |
+
|
| 435 |
+
# Upload the .tar.gz file
|
| 436 |
+
# Needs to be overriden based on specific environment needs
|
| 437 |
+
upload_tar = None
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
_save_config_ignore = {
|
| 441 |
+
# workaround: "Can't pickle <function ...>"
|
| 442 |
+
"trace.upload_tar",
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
from .._dynamo.config_utils import install_config_module
|
| 447 |
+
|
| 448 |
+
# adds patch, save_config, etc
|
| 449 |
+
install_config_module(sys.modules[__name__])
|
llava_next/lib/python3.10/site-packages/torch/_inductor/cudagraph_trees.py
ADDED
|
@@ -0,0 +1,2137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CUDA graph trees are a safety abstraction over CUDAGraphs, similar to make_graph_callables,
|
| 3 |
+
which share the same memory pool. Sharing a memory pool is an extremely
|
| 4 |
+
important optimization when chaining multiple CUDA graphs together, as it
|
| 5 |
+
prevents you from needing to copy intermediate tensors from one graph to the
|
| 6 |
+
next, and reduces overall memory usage by allowing dead memory from the first
|
| 7 |
+
pool to be reused in the second.
|
| 8 |
+
|
| 9 |
+
The standard graph/make_graph_callables support sharing memory pool, but
|
| 10 |
+
with a lot of caveats. CUDA graph trees remove these restrictions:
|
| 11 |
+
|
| 12 |
+
* Previously, if you recorded graphs A, B, you had to replay A, B in that
|
| 13 |
+
order. With CUDA graph trees, after replaying A, you can change your
|
| 14 |
+
mind and record/replay a different graph B'; we will support efficient
|
| 15 |
+
execution of both A, B and A, B', using only max(mem(A, B), mem(A, B')). In
|
| 16 |
+
other words: we support arbitrary trees of CUDA graph operations, not just
|
| 17 |
+
sequences (this is why this feature is called CUDA graph trees.)
|
| 18 |
+
|
| 19 |
+
* Previously, if you executed graph A, some non-CUDA graph code, and then
|
| 20 |
+
graph B, after executing graph B, it was not safe to retain any references
|
| 21 |
+
to intermediates produced by A. With CUDA graph trees, we track if any
|
| 22 |
+
outputs of graph A are still live by the time graph B is run, and make
|
| 23 |
+
sure graph B doesn't clobber there memory when reusing the CUDA graphs
|
| 24 |
+
pool. You'll get a separate recording of B depending on what tensors
|
| 25 |
+
stay live or dead.
|
| 26 |
+
|
| 27 |
+
CUDA graph trees are flexible enough to be used in Dynamo across graph breaks,
|
| 28 |
+
which is their primary use case.
|
| 29 |
+
|
| 30 |
+
The ability to switch from replay to record is fairly nontrivial: remember that
|
| 31 |
+
when you replay a CUDA graph, you only replay CUDA operations; no CPU side state
|
| 32 |
+
is updated. In particular, the CPU-side book-keeping for the allocator is not
|
| 33 |
+
reconstructed. However, to record a new child CUDA graph, we must restore this
|
| 34 |
+
book-keeping. This is what checkpoint pool state is used for.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
from __future__ import annotations
|
| 38 |
+
|
| 39 |
+
import contextlib
|
| 40 |
+
import dataclasses
|
| 41 |
+
import functools
|
| 42 |
+
import gc
|
| 43 |
+
import itertools
|
| 44 |
+
import logging
|
| 45 |
+
import operator
|
| 46 |
+
import sys
|
| 47 |
+
import threading
|
| 48 |
+
import traceback
|
| 49 |
+
import warnings
|
| 50 |
+
import weakref
|
| 51 |
+
from collections import defaultdict
|
| 52 |
+
|
| 53 |
+
from enum import auto, Enum
|
| 54 |
+
from typing import (
|
| 55 |
+
Any,
|
| 56 |
+
Callable,
|
| 57 |
+
Dict,
|
| 58 |
+
Iterator,
|
| 59 |
+
List,
|
| 60 |
+
Optional,
|
| 61 |
+
Sequence,
|
| 62 |
+
Set,
|
| 63 |
+
Tuple,
|
| 64 |
+
Union,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
import torch.fx
|
| 68 |
+
from torch import Tensor
|
| 69 |
+
from torch._dynamo.mutation_guard import GenerationTracker
|
| 70 |
+
from torch._dynamo.utils import preserve_rng_state
|
| 71 |
+
from torch._inductor.compile_fx import (
|
| 72 |
+
align_inputs_from_check_idxs,
|
| 73 |
+
copy_misaligned_inputs,
|
| 74 |
+
get_expanded_dims,
|
| 75 |
+
get_input_idxs_to_check,
|
| 76 |
+
index_expanded_dims,
|
| 77 |
+
remove_unaligned_input_idxs,
|
| 78 |
+
static_input,
|
| 79 |
+
)
|
| 80 |
+
from torch.multiprocessing.reductions import StorageWeakRef
|
| 81 |
+
from torch.storage import UntypedStorage
|
| 82 |
+
from torch.types import _bool
|
| 83 |
+
from torch.utils import _pytree as pytree
|
| 84 |
+
from torch.utils.weak import TensorWeakRef
|
| 85 |
+
|
| 86 |
+
StorageWeakRefPointer = int
|
| 87 |
+
StorageDataPtr = int
|
| 88 |
+
NBytes = int
|
| 89 |
+
|
| 90 |
+
if torch.backends.cuda.is_built():
|
| 91 |
+
from torch._C import (
|
| 92 |
+
_cuda_CUDAAllocator_AllocatorState as AllocatorState,
|
| 93 |
+
_set_cached_tensors_enabled as _set_cached_tensors_enabled,
|
| 94 |
+
)
|
| 95 |
+
else:
|
| 96 |
+
|
| 97 |
+
class AllocatorState: # type: ignore[no-redef]
|
| 98 |
+
pass
|
| 99 |
+
|
| 100 |
+
def _set_cached_tensors_enabled(enabled: _bool) -> None:
|
| 101 |
+
pass
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
log = logging.getLogger(__name__)
|
| 105 |
+
|
| 106 |
+
from . import config
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@dataclasses.dataclass(frozen=True)
|
| 110 |
+
class GraphID:
|
| 111 |
+
"Unique counter of a cuda graph recording"
|
| 112 |
+
id: int
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@dataclasses.dataclass(frozen=True)
|
| 116 |
+
class FunctionID:
|
| 117 |
+
"Unique counter of a function wrapped in cudagraphify_impl"
|
| 118 |
+
id: int
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@dataclasses.dataclass(frozen=True)
|
| 122 |
+
class WrappedFunction:
|
| 123 |
+
"""
|
| 124 |
+
Represents a function that you want to record for CUDA graph replay,
|
| 125 |
+
with a little more metadata so we can identify if we have an applicable
|
| 126 |
+
CUDA graph in our CUDA graph tree for it.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
model: Callable[..., Any]
|
| 130 |
+
static_input_idxs: Sequence[int]
|
| 131 |
+
id: FunctionID
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def clear_cublass_cache():
|
| 135 |
+
"""
|
| 136 |
+
Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for
|
| 137 |
+
doing warmup within a CUDAGraph private pool because we do not want persistent allocations from
|
| 138 |
+
one one run to the next. When we begin a new run of a cudagraphs path (generation), all tensors
|
| 139 |
+
from the previous generation are freed. This frees them the the memory pool, but not elsewhere.
|
| 140 |
+
A tensor in the cublas workspace would continue to be in use the workspace but would also get allocated
|
| 141 |
+
in the next run. The memory would be in use in two places.
|
| 142 |
+
|
| 143 |
+
To solve this, we clear cublas caches before and after warming up or recording. If a workspace is required
|
| 144 |
+
it will be allocated to the cudagraph private pool and accounted for in the allocator for the duration of the
|
| 145 |
+
program. There is no overhead to this on replay since cudagraphs removes allocation overhead.
|
| 146 |
+
"""
|
| 147 |
+
torch._C._cuda_clearCublasWorkspaces()
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@contextlib.contextmanager
|
| 151 |
+
def clear_cublas_manager():
|
| 152 |
+
"Context manager around clearing cublas caches that will clear on enter and exit"
|
| 153 |
+
clear_cublass_cache()
|
| 154 |
+
try:
|
| 155 |
+
yield
|
| 156 |
+
finally:
|
| 157 |
+
clear_cublass_cache()
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@contextlib.contextmanager
|
| 161 |
+
def disable_conv_cache_emptying():
|
| 162 |
+
prev = torch._C._cuda_get_conv_benchmark_empty_cache()
|
| 163 |
+
torch._C._cudnn_set_conv_benchmark_empty_cache(False)
|
| 164 |
+
try:
|
| 165 |
+
yield
|
| 166 |
+
finally:
|
| 167 |
+
torch._C._cudnn_set_conv_benchmark_empty_cache(prev)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@contextlib.contextmanager
|
| 171 |
+
def enable_history_recording():
|
| 172 |
+
"Turns on history recording in the CUDA Caching Allocator"
|
| 173 |
+
enabled = torch._C._cuda_isHistoryEnabled()
|
| 174 |
+
try:
|
| 175 |
+
if not enabled:
|
| 176 |
+
torch.cuda.memory._record_memory_history()
|
| 177 |
+
yield
|
| 178 |
+
finally:
|
| 179 |
+
if not enabled:
|
| 180 |
+
torch.cuda.memory._record_memory_history(None)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def get_history_recording():
|
| 184 |
+
# TODO - remove, prevents cleanup
|
| 185 |
+
if not config.triton.cudagraph_trees_history_recording:
|
| 186 |
+
return contextlib.nullcontext()
|
| 187 |
+
return enable_history_recording()
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class TreeManagerContainer:
|
| 191 |
+
"""
|
| 192 |
+
Manages the lifetime of the tree manager. Like `PrivatePool` in cuda caching allocator,
|
| 193 |
+
the tree and its corresponding memory pool should be kept alive as long as any outstanding
|
| 194 |
+
graph or tensor which is an output of a graph remains alive.
|
| 195 |
+
|
| 196 |
+
There is a single tree manager container per device.
|
| 197 |
+
|
| 198 |
+
The lifecycle of a tree_manager is:
|
| 199 |
+
- Is constructed, no graph, no fns, no tensors
|
| 200 |
+
- Tree manager is fetched, resulting in tree manager being allocated
|
| 201 |
+
- We generate a bunch of functions, calling add_strong_reference
|
| 202 |
+
- These functions die, calling finalize_reference
|
| 203 |
+
- When all the functions die, we finalize_tree_manager.
|
| 204 |
+
|
| 205 |
+
TODO: in the future, we would like to do the following once storage weak refs land
|
| 206 |
+
- We look for all the live storages and add references to THOSE
|
| 207 |
+
- We count as storages die
|
| 208 |
+
- All the storages are dead, we deallocate the tree manager
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
def __init__(self, device_index):
|
| 212 |
+
# This class keeps a strong reference to tree_manager,
|
| 213 |
+
# but upon all other strong references to the tree_manager will reset it to None.
|
| 214 |
+
# We need a strong reference so that we can still access its attributes upon cleanup.
|
| 215 |
+
self.tree_manager: Optional[CUDAGraphTreeManager] = None
|
| 216 |
+
|
| 217 |
+
# Number of outstanding references to the current tree manager
|
| 218 |
+
self.live_cudagraphify_fns = 0
|
| 219 |
+
|
| 220 |
+
self.device_index = device_index
|
| 221 |
+
|
| 222 |
+
# Following two objects are only set in the case that Tensor outputs outlive
|
| 223 |
+
# the cudagraphify_fns. Reference to the Graph is needed to keep the private pool from
|
| 224 |
+
# deallocation.
|
| 225 |
+
self.live_storages_count = 0
|
| 226 |
+
self.graph: Optional[torch.cuda.CUDAGraph] = None
|
| 227 |
+
|
| 228 |
+
self.lock = threading.Lock()
|
| 229 |
+
|
| 230 |
+
def _finalize_tensor(self):
|
| 231 |
+
with self.lock:
|
| 232 |
+
self.live_storages_count -= 1
|
| 233 |
+
if self.live_storages_count == 0:
|
| 234 |
+
self.graph = None
|
| 235 |
+
|
| 236 |
+
# manager was used again after existing cleanup,
|
| 237 |
+
# we shouldnt set it to None
|
| 238 |
+
if self.live_cudagraphify_fns == 0:
|
| 239 |
+
self.tree_manager = None
|
| 240 |
+
|
| 241 |
+
def finalize_cudagraphify_fn(self):
|
| 242 |
+
with self.lock:
|
| 243 |
+
self.live_cudagraphify_fns -= 1
|
| 244 |
+
if self.live_cudagraphify_fns == 0:
|
| 245 |
+
self._finalize_tree_manager()
|
| 246 |
+
|
| 247 |
+
def _finalize_tree_manager(self):
|
| 248 |
+
assert self.lock.locked()
|
| 249 |
+
self.tree_manager = None
|
| 250 |
+
|
| 251 |
+
# TODO - when issue #91395 is landed, we can set a weakref on
|
| 252 |
+
# storages and trigger a deallocation when all outputs of the
|
| 253 |
+
# cudagraph are dead.
|
| 254 |
+
|
| 255 |
+
# live_storages = list(
|
| 256 |
+
# tree_manager.live_cudagraph_pool_storages_in_curr_execution()
|
| 257 |
+
# )
|
| 258 |
+
|
| 259 |
+
# # Maintain reference to graph to keep tensors alive
|
| 260 |
+
# assert len(tree_manager.roots) > 0, "expected at least one use"
|
| 261 |
+
# root = next(tree_manager.get_roots())
|
| 262 |
+
# self.graph = root.graph
|
| 263 |
+
# seen_storages = set()
|
| 264 |
+
# for stor in live_storages:
|
| 265 |
+
# if stor in seen_storages:
|
| 266 |
+
# continue
|
| 267 |
+
# seen_storages.add(stor)
|
| 268 |
+
# self.live_storages_count += 1
|
| 269 |
+
# . weakref.finalize(stor, self._finalize_tensor)
|
| 270 |
+
|
| 271 |
+
def add_strong_reference(self, fn: Callable[..., Any]):
|
| 272 |
+
with self.lock:
|
| 273 |
+
self.live_cudagraphify_fns += 1
|
| 274 |
+
|
| 275 |
+
weakref.finalize(fn, self.finalize_cudagraphify_fn)
|
| 276 |
+
|
| 277 |
+
def get_tree_manager(self) -> CUDAGraphTreeManager:
|
| 278 |
+
with self.lock:
|
| 279 |
+
if self.tree_manager is None:
|
| 280 |
+
self.tree_manager = CUDAGraphTreeManager(self.device_index)
|
| 281 |
+
return self.tree_manager
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
local = threading.local()
|
| 285 |
+
|
| 286 |
+
# one tree manager per device
|
| 287 |
+
local.tree_manager_containers = {}
|
| 288 |
+
local.tree_manager_locks = defaultdict(threading.Lock)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# only incremented by user call of mark_step_begin
|
| 292 |
+
class MarkStepBox:
|
| 293 |
+
mark_step_counter = 0
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# We need to register this as an object that will be copied over as TLS when new
|
| 297 |
+
# threads are created in autograd
|
| 298 |
+
torch._C._stash_obj_in_tls("tree_manager_containers", local.tree_manager_containers)
|
| 299 |
+
torch._C._stash_obj_in_tls("tree_manager_locks", local.tree_manager_locks)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def mark_step_begin():
|
| 303 |
+
"Indicates that a new iteration of inference or training is about to begin."
|
| 304 |
+
|
| 305 |
+
# iterate down to distinguish from GenerationTracking counter
|
| 306 |
+
MarkStepBox.mark_step_counter -= 1
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def reset_cudagraph_trees():
|
| 310 |
+
"Clear all cudagraph trees"
|
| 311 |
+
# see shutdown below for why this is necessary
|
| 312 |
+
container_dict = get_obj(local, "tree_manager_containers")
|
| 313 |
+
locks_dict = get_obj(local, "tree_manager_locks")
|
| 314 |
+
for device, lock in locks_dict.items():
|
| 315 |
+
with lock:
|
| 316 |
+
container = container_dict.get(device)
|
| 317 |
+
if not container or not container.tree_manager:
|
| 318 |
+
continue
|
| 319 |
+
|
| 320 |
+
container.tree_manager.shutdown()
|
| 321 |
+
|
| 322 |
+
_set_cached_tensors_enabled(False)
|
| 323 |
+
container_dict.clear()
|
| 324 |
+
|
| 325 |
+
MarkStepBox.mark_step_counter = 0
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def get_obj(local, attr_name):
|
| 329 |
+
if hasattr(local, attr_name):
|
| 330 |
+
return getattr(local, attr_name)
|
| 331 |
+
else:
|
| 332 |
+
assert torch._C._is_key_in_tls(attr_name)
|
| 333 |
+
return torch._C._get_obj_in_tls(attr_name)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def get_container(device_index: int):
|
| 337 |
+
container_dict = get_obj(local, "tree_manager_containers")
|
| 338 |
+
lock = get_obj(local, "tree_manager_locks")[device_index]
|
| 339 |
+
|
| 340 |
+
with lock:
|
| 341 |
+
if device_index not in container_dict:
|
| 342 |
+
container_dict[device_index] = TreeManagerContainer(device_index)
|
| 343 |
+
|
| 344 |
+
return container_dict[device_index]
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def get_manager(
|
| 348 |
+
device_index: int, create_if_none_exists=True
|
| 349 |
+
) -> Optional[CUDAGraphTreeManager]:
|
| 350 |
+
if create_if_none_exists:
|
| 351 |
+
return get_container(device_index).get_tree_manager()
|
| 352 |
+
return get_container(device_index).tree_manager
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def cudagraphify_impl(model, inputs, static_input_idxs, *args, **kwargs):
|
| 356 |
+
fn_cache: Dict[Tuple[int, ...], Callable[..., Any]] = {}
|
| 357 |
+
|
| 358 |
+
# Detect int inputs: we need to index on these
|
| 359 |
+
int_key = [i for i, v in enumerate(inputs) if isinstance(v, int)]
|
| 360 |
+
get_ints: Any = operator.itemgetter(*int_key) if int_key else lambda _: None
|
| 361 |
+
|
| 362 |
+
del inputs
|
| 363 |
+
|
| 364 |
+
def deferred_cudagraphify(inputs):
|
| 365 |
+
int_key = get_ints(inputs)
|
| 366 |
+
fn = fn_cache.get(int_key)
|
| 367 |
+
if fn is not None:
|
| 368 |
+
return fn(inputs)
|
| 369 |
+
|
| 370 |
+
log.info("recording cudagraph tree for %s", int_key)
|
| 371 |
+
|
| 372 |
+
# first get indices we need to check to align, then update our static inputs,
|
| 373 |
+
# and finally copy
|
| 374 |
+
check_input_idxs = get_input_idxs_to_check(inputs, static_input_idxs)
|
| 375 |
+
new_static_input_idxs = remove_unaligned_input_idxs(inputs, static_input_idxs)
|
| 376 |
+
copy_misaligned_inputs(inputs, check_input_idxs)
|
| 377 |
+
|
| 378 |
+
fn, out = cudagraphify(model, inputs, new_static_input_idxs, *args, **kwargs)
|
| 379 |
+
fn = align_inputs_from_check_idxs(fn, inputs_to_check=check_input_idxs)
|
| 380 |
+
fn_cache[int_key] = fn
|
| 381 |
+
|
| 382 |
+
return out
|
| 383 |
+
|
| 384 |
+
return deferred_cudagraphify
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def cudagraphify(
|
| 388 |
+
model,
|
| 389 |
+
inputs,
|
| 390 |
+
static_input_idxs=(),
|
| 391 |
+
*,
|
| 392 |
+
device_index: int,
|
| 393 |
+
is_backward: bool,
|
| 394 |
+
is_inference: bool,
|
| 395 |
+
stack_traces: Optional[StackTraces] = None,
|
| 396 |
+
):
|
| 397 |
+
manager = get_container(device_index).get_tree_manager()
|
| 398 |
+
assert not (is_backward and is_inference)
|
| 399 |
+
mode = (
|
| 400 |
+
CompilationMode.BACKWARD
|
| 401 |
+
if is_backward
|
| 402 |
+
else (CompilationMode.INFERENCE if is_inference else CompilationMode.FORWARD)
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
return manager.add_function(
|
| 406 |
+
model,
|
| 407 |
+
inputs,
|
| 408 |
+
static_input_idxs,
|
| 409 |
+
stack_traces,
|
| 410 |
+
mode,
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
class StorageWeakRefWrapper:
|
| 415 |
+
"""
|
| 416 |
+
Wrapper around a storage weak ref. Will deallocate it upon expiration if invoked.
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
__slots__ = ["ref", "_data_ptr", "extra_ref_check"]
|
| 420 |
+
|
| 421 |
+
storage_ref: Optional[StorageWeakRef]
|
| 422 |
+
|
| 423 |
+
def __init__(
|
| 424 |
+
self,
|
| 425 |
+
inp: Union[Tensor, UntypedStorage],
|
| 426 |
+
extra_ref_check: Optional[Callable[[], None]] = None,
|
| 427 |
+
):
|
| 428 |
+
"""
|
| 429 |
+
extra_ref_check is an additional check we need to run to check if the
|
| 430 |
+
weak ref has expired. in checking storage use count we assume extra_ref_check
|
| 431 |
+
will hold an additional reference to the storage.
|
| 432 |
+
"""
|
| 433 |
+
if isinstance(inp, Tensor):
|
| 434 |
+
stor = inp.untyped_storage()
|
| 435 |
+
else:
|
| 436 |
+
assert isinstance(inp, UntypedStorage)
|
| 437 |
+
stor = inp
|
| 438 |
+
self.ref = StorageWeakRef(stor)
|
| 439 |
+
self._data_ptr = stor.data_ptr()
|
| 440 |
+
self.extra_ref_check = extra_ref_check
|
| 441 |
+
|
| 442 |
+
@classmethod
|
| 443 |
+
def from_weakref_and_data_ptr(cls, cdata, data_ptr, extra_ref_check=None):
|
| 444 |
+
instance = cls.__new__(cls)
|
| 445 |
+
instance._data_ptr = data_ptr
|
| 446 |
+
instance.ref = StorageWeakRef.from_weakref(cdata)
|
| 447 |
+
instance.extra_ref_check = extra_ref_check
|
| 448 |
+
return instance
|
| 449 |
+
|
| 450 |
+
def __call__(self) -> Optional[StorageWeakRefPointer]:
|
| 451 |
+
if self.expired():
|
| 452 |
+
return None
|
| 453 |
+
|
| 454 |
+
return self.ref.cdata
|
| 455 |
+
|
| 456 |
+
def swap_weakref(self, cdata):
|
| 457 |
+
self.ref.__del__()
|
| 458 |
+
self.ref.cdata = cdata
|
| 459 |
+
|
| 460 |
+
def data_ptr(self) -> int:
|
| 461 |
+
"NB: returns the data ptr even if the storage has expired"
|
| 462 |
+
return self._data_ptr
|
| 463 |
+
|
| 464 |
+
def remove_extra_reference(self):
|
| 465 |
+
self.extra_ref_check = None
|
| 466 |
+
|
| 467 |
+
def expired(self):
|
| 468 |
+
if self.extra_ref_check is not None and not self.extra_ref_check():
|
| 469 |
+
return False
|
| 470 |
+
|
| 471 |
+
# if extra_ref_check is not None we expect an additional reference
|
| 472 |
+
stor_count = torch._C._storage_Use_Count(self.ref.cdata)
|
| 473 |
+
return (stor_count - (self.extra_ref_check is not None)) == 0
|
| 474 |
+
|
| 475 |
+
def __repr__(self):
|
| 476 |
+
if self.ref is None or self.ref.expired():
|
| 477 |
+
return f"StorageWeakRefWrapper to {self.data_ptr()}; dead"
|
| 478 |
+
else:
|
| 479 |
+
return f"StorageWeakRefWrapper to {self.data_ptr()}; alive"
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def is_live(weak_ref: Optional[StorageWeakRefWrapper]) -> bool:
|
| 483 |
+
return maybe_deref(weak_ref) is not None
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def maybe_deref(
|
| 487 |
+
weak_ref: Optional[StorageWeakRefWrapper],
|
| 488 |
+
) -> Optional[Tuple[UntypedStorage, int]]:
|
| 489 |
+
if weak_ref is None:
|
| 490 |
+
return None
|
| 491 |
+
r = weak_ref()
|
| 492 |
+
if r is None:
|
| 493 |
+
return None
|
| 494 |
+
# NB: r.data_ptr() does not necessarily equal weak_ref.data_ptr()
|
| 495 |
+
return r, weak_ref.data_ptr()
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
@contextlib.contextmanager
|
| 499 |
+
def _use_cuda_memory_pool_manager(device, mem_pool, stream):
|
| 500 |
+
"""
|
| 501 |
+
Context manager to use cuda graph pool for new allocations. If you use this manager
|
| 502 |
+
all cudagraph tensors in use should be reflected in the allocator or they will be overwritten.
|
| 503 |
+
existing_graph should already have been used in a capture, and the mem_pool must already exist,
|
| 504 |
+
because this manager will not preserve a reference to the pool which keeps it alive.
|
| 505 |
+
"""
|
| 506 |
+
torch.cuda.synchronize()
|
| 507 |
+
stream.wait_stream(torch.cuda.current_stream())
|
| 508 |
+
|
| 509 |
+
with torch.cuda.stream(stream), torch.device(device):
|
| 510 |
+
torch._C._cuda_beginAllocateCurrentStreamToPool(device, mem_pool)
|
| 511 |
+
try:
|
| 512 |
+
yield
|
| 513 |
+
finally:
|
| 514 |
+
torch._C._cuda_endAllocateCurrentStreamToPool(device)
|
| 515 |
+
torch._C._cuda_releasePool(device, mem_pool)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def map_to_ref(t: Optional[Tensor]) -> Optional[StorageWeakRefWrapper]:
|
| 519 |
+
if not isinstance(t, torch.Tensor):
|
| 520 |
+
assert t is None
|
| 521 |
+
return None
|
| 522 |
+
return StorageWeakRefWrapper(t)
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
# A path index of (depth, offset) indices into a graph that is `depth`` number of nodes from the root
|
| 526 |
+
# at graph output offset
|
| 527 |
+
PathOutputIndex = Tuple[int, int]
|
| 528 |
+
|
| 529 |
+
# For each node in the path, for each output, is the output alive
|
| 530 |
+
PathLiveness = List[List[bool]]
|
| 531 |
+
|
| 532 |
+
StackTraces = List[Optional[str]]
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
class CUDAWarmupNode:
|
| 536 |
+
"""
|
| 537 |
+
Simplified Wrapper around A CUDA Model that wraps outputs in storage refs and exposes
|
| 538 |
+
apis to get the live storages in the current chain of warmup.
|
| 539 |
+
|
| 540 |
+
A CUDAWarmupNode may have either CUDAGraphNode or CUDAWarmupNode as a parent, but may only have
|
| 541 |
+
CUDAWarmupNode as children, because we cannot record or execute with tensors which do not have stable
|
| 542 |
+
memory addresses.
|
| 543 |
+
|
| 544 |
+
CUDAWarmupNode and CUDAGraphNode have a number of differences that make it easier to use separate classes.
|
| 545 |
+
- Much of the CUDAGraphNode logic & initialization is based on the tensor properties of first recording. In the
|
| 546 |
+
first instance of warmup, these are not finalized yet.
|
| 547 |
+
- All Inputs to the RecordedFunction must be copied over to the cuda graph memory pool, this is unnecessary in warmup.
|
| 548 |
+
- CUDAWarmup is only used once and so does not need to optimize as much bookkeeping. It is much simpler.
|
| 549 |
+
|
| 550 |
+
NB: this class and CUDAGraphNode need to expose `path_live_weakrefs`, `all_outputs_are_dead`, and
|
| 551 |
+
`self.outputs_weakrefs`, `stack_traces`, and `tensor_weakrefs` for compatibility.
|
| 552 |
+
"""
|
| 553 |
+
|
| 554 |
+
def __init__(
|
| 555 |
+
self,
|
| 556 |
+
wrapped_function: WrappedFunction,
|
| 557 |
+
parent,
|
| 558 |
+
cuda_graphs_pool: Tuple[int, int],
|
| 559 |
+
existing_cuda_graph: torch.cuda.Graph,
|
| 560 |
+
device_index: int,
|
| 561 |
+
stack_traces: Optional[StackTraces],
|
| 562 |
+
stream: torch.cuda.Stream,
|
| 563 |
+
already_warm: bool,
|
| 564 |
+
):
|
| 565 |
+
self.wrapped_function = wrapped_function
|
| 566 |
+
self.parent = parent
|
| 567 |
+
self.cuda_graphs_pool = cuda_graphs_pool
|
| 568 |
+
self.outputs_weakrefs: List[Optional[StorageWeakRefWrapper]] = []
|
| 569 |
+
self.tensor_weakrefs: List[Optional[TensorWeakRef]] = []
|
| 570 |
+
self.existing_cuda_graph = existing_cuda_graph
|
| 571 |
+
self.has_run = False
|
| 572 |
+
self.device_index = device_index
|
| 573 |
+
self.stack_traces = stack_traces
|
| 574 |
+
self.stream = stream
|
| 575 |
+
self.already_warm = already_warm
|
| 576 |
+
|
| 577 |
+
def run(self, new_inputs):
|
| 578 |
+
assert not self.has_run, "Wrapped function should never be run twice"
|
| 579 |
+
|
| 580 |
+
# See: output_is_alias_of_persistent_static_inputs below. We should only be returning freshly created
|
| 581 |
+
# storages in path_live_weakrefs.
|
| 582 |
+
existing_path_data_ptrs = {
|
| 583 |
+
t.data_ptr() for t in self.path_live_weakrefs() if t()
|
| 584 |
+
}
|
| 585 |
+
non_cudagraph_inps = set()
|
| 586 |
+
for i in range(len(new_inputs)):
|
| 587 |
+
if (
|
| 588 |
+
isinstance(new_inputs[i], torch.Tensor)
|
| 589 |
+
and new_inputs[i].untyped_storage().data_ptr()
|
| 590 |
+
not in existing_path_data_ptrs
|
| 591 |
+
):
|
| 592 |
+
non_cudagraph_inps.add(new_inputs[i].untyped_storage().data_ptr())
|
| 593 |
+
|
| 594 |
+
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
|
| 595 |
+
refs = list(self.path_live_weakrefs())
|
| 596 |
+
check_memory_pool(self.device_index, self.cuda_graphs_pool, refs)
|
| 597 |
+
|
| 598 |
+
with torch.cuda.device(
|
| 599 |
+
self.device_index
|
| 600 |
+
), disable_conv_cache_emptying(), clear_cublas_manager(), _use_cuda_memory_pool_manager(
|
| 601 |
+
self.device_index, self.cuda_graphs_pool, self.stream
|
| 602 |
+
), get_history_recording():
|
| 603 |
+
out = self.wrapped_function.model(new_inputs)
|
| 604 |
+
|
| 605 |
+
# sync up stream used in `_use_cuda_memory_pool_manager` - TODO - wait stream instead ?
|
| 606 |
+
torch.cuda.synchronize()
|
| 607 |
+
|
| 608 |
+
assert len(new_inputs) == 0
|
| 609 |
+
|
| 610 |
+
# sdpa returns cpu tensors when not recording cuda graph
|
| 611 |
+
def add_ref(o):
|
| 612 |
+
return (
|
| 613 |
+
o is not None
|
| 614 |
+
and isinstance(o, torch.Tensor)
|
| 615 |
+
and o.is_cuda
|
| 616 |
+
and o.untyped_storage().data_ptr() not in non_cudagraph_inps
|
| 617 |
+
and o.untyped_storage().data_ptr() != 0
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
self.outputs_weakrefs.extend(
|
| 621 |
+
[map_to_ref(o) if add_ref(o) else None for o in out]
|
| 622 |
+
)
|
| 623 |
+
self.tensor_weakrefs.extend(
|
| 624 |
+
[TensorWeakRef(o) if add_ref(o) else None for o in out]
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
|
| 628 |
+
out_refs = self.path_live_weakrefs()
|
| 629 |
+
new_storages = [
|
| 630 |
+
t for t in out_refs if t.data_ptr() not in non_cudagraph_inps
|
| 631 |
+
]
|
| 632 |
+
check_memory_pool(self.device_index, self.cuda_graphs_pool, new_storages)
|
| 633 |
+
|
| 634 |
+
return out
|
| 635 |
+
|
| 636 |
+
@property
|
| 637 |
+
def _path_from_root(self):
|
| 638 |
+
nodes = []
|
| 639 |
+
node = self
|
| 640 |
+
while node:
|
| 641 |
+
nodes.append(node)
|
| 642 |
+
node = node.parent
|
| 643 |
+
|
| 644 |
+
yield from reversed(nodes)
|
| 645 |
+
|
| 646 |
+
def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
|
| 647 |
+
"Returns all live storages weakrefs that created by nodes in this path"
|
| 648 |
+
for node in self._path_from_root:
|
| 649 |
+
for output in node.outputs_weakrefs:
|
| 650 |
+
if is_live(output):
|
| 651 |
+
yield output
|
| 652 |
+
|
| 653 |
+
def all_outputs_are_dead(self):
|
| 654 |
+
return not list(self.path_live_weakrefs())
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
# Aliases for List that say what the indices denote
|
| 658 |
+
InputList = List # input indexes
|
| 659 |
+
OutputList = List # output indexes
|
| 660 |
+
LevelList = List # levels (distance from root of tree)
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
class OutputAliasInfo:
|
| 664 |
+
pass
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class _UnaliasedStorage(OutputAliasInfo):
|
| 668 |
+
"Singleton to mark that the graph output constructs a new alias or is None"
|
| 669 |
+
pass
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
UnaliasedStorage = _UnaliasedStorage()
|
| 673 |
+
|
| 674 |
+
|
| 675 |
+
class AliasesPriorGraphOutput(OutputAliasInfo):
|
| 676 |
+
"Marks that the graph output aliases an output of a prior graph"
|
| 677 |
+
__slots__ = ["index"]
|
| 678 |
+
|
| 679 |
+
index: PathOutputIndex
|
| 680 |
+
|
| 681 |
+
def __init__(self, index: PathOutputIndex):
|
| 682 |
+
assert isinstance(index, tuple)
|
| 683 |
+
self.index = index
|
| 684 |
+
|
| 685 |
+
|
| 686 |
+
class AliasesNewOutput(OutputAliasInfo):
|
| 687 |
+
"Marks that the graph output aliases an index in the new, returned outputs"
|
| 688 |
+
|
| 689 |
+
__slots__ = ["index"]
|
| 690 |
+
|
| 691 |
+
index: int
|
| 692 |
+
|
| 693 |
+
def __init__(self, index):
|
| 694 |
+
assert isinstance(index, int)
|
| 695 |
+
self.index = index
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
class CUDAGraphNode:
|
| 699 |
+
"""
|
| 700 |
+
A single recording of a function into a CUDA Graph. Recordings of CUDA Graphs share a single memory pool
|
| 701 |
+
and are structured into a tree, where there is a single recording that can precede it (parent) and multiple
|
| 702 |
+
subsequent recordings that may follow (children). A node will have no parent if it is the first recording
|
| 703 |
+
in a tree; i.e., when it is first recorded, there are no live tensors from a previous recording which
|
| 704 |
+
would force a dependency.
|
| 705 |
+
|
| 706 |
+
On first recording, all of the live tensors in the current CUDA Graph Node path will be
|
| 707 |
+
reflected in the corresponding private pool. On subsequent executions, the caching allocator
|
| 708 |
+
is unaffected when the graph is replayed.
|
| 709 |
+
|
| 710 |
+
In order to support recording a subsequent cuda graph recording after execution of this graph,
|
| 711 |
+
we checkpoint the state of the memory pool so that it may later be resumed.
|
| 712 |
+
|
| 713 |
+
WrappedFunction should have already been warmed up prior to invocation.
|
| 714 |
+
|
| 715 |
+
See [setCheckpointPoolState] for further explanation, as well as
|
| 716 |
+
https://user-images.githubusercontent.com/13564/222815509-374f3400-f83d-4f7d-8fa6-4a092b3250bb.png
|
| 717 |
+
"""
|
| 718 |
+
|
| 719 |
+
def __init__(
|
| 720 |
+
self,
|
| 721 |
+
wrapped_function: WrappedFunction,
|
| 722 |
+
id: GraphID,
|
| 723 |
+
parent: Optional[CUDAGraphNode],
|
| 724 |
+
inputs: List[Tensor],
|
| 725 |
+
cuda_graphs_pool: Tuple[int, int],
|
| 726 |
+
device_index: int,
|
| 727 |
+
stack_traces: Optional[StackTraces],
|
| 728 |
+
stream: torch.cuda.Stream,
|
| 729 |
+
):
|
| 730 |
+
assert isinstance(inputs, (list, tuple))
|
| 731 |
+
|
| 732 |
+
self.wrapped_function = wrapped_function
|
| 733 |
+
self.id = id
|
| 734 |
+
self.device = device_index
|
| 735 |
+
self.stack_traces = stack_traces
|
| 736 |
+
self.stream = stream
|
| 737 |
+
|
| 738 |
+
# if this is a root parent will be None. use weakref to prevent reference cycle
|
| 739 |
+
self._parent = weakref.ref(parent) if parent is not None else None
|
| 740 |
+
# reference to the shared memory pool for the entire cuda graphs tree
|
| 741 |
+
self.cuda_graphs_pool = cuda_graphs_pool
|
| 742 |
+
|
| 743 |
+
# A single wrapped function may be recorded multiple times if memory patterns or
|
| 744 |
+
# invariants change from one execution to the next
|
| 745 |
+
self.children: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
|
| 746 |
+
|
| 747 |
+
# StorageWeakRef maintains whether the Storage C++ object remains allocated,
|
| 748 |
+
# not whether the corresponding memory has been deallocated. In order
|
| 749 |
+
# to use them to track memory deallocations we must maintain a single StorageWeakRef
|
| 750 |
+
# for all Storages that reference that memory (even if we are constructing Storages
|
| 751 |
+
# that do not have a deallocator function). We maintain one single storage_cache
|
| 752 |
+
# as we execute any tree path. When we retrieve a storage from the cache we
|
| 753 |
+
# check that it is still alive, and we hash based on observed recording data ptr
|
| 754 |
+
# and storage cdata.
|
| 755 |
+
|
| 756 |
+
# we preserve a single reference to executed outputs that is then referenced
|
| 757 |
+
# in children to avoid children having to chase parent pointers in the hot path
|
| 758 |
+
# DO NOT reassign output_weakrefs, only call `clear()`
|
| 759 |
+
# Path is a series of nodes from root to the current node
|
| 760 |
+
self.outputs_weakrefs: OutputList[Optional[StorageWeakRefWrapper]] = []
|
| 761 |
+
self.path_weakrefs: LevelList[OutputList[Optional[StorageWeakRefWrapper]]] = [
|
| 762 |
+
node.outputs_weakrefs for node in self._path_from_root
|
| 763 |
+
]
|
| 764 |
+
self.path_stacktraces: LevelList[StackTraces] = [
|
| 765 |
+
node.stack_traces for node in self._path_from_root
|
| 766 |
+
]
|
| 767 |
+
self.tensor_weakrefs: OutputList[Optional[TensorWeakRef]] = []
|
| 768 |
+
|
| 769 |
+
# tensors which are outputs of previous graphs in the tree
|
| 770 |
+
self.cudagraph_managed_idxs: List[int] = [
|
| 771 |
+
idx
|
| 772 |
+
for idx, t in enumerate(inputs)
|
| 773 |
+
if isinstance(t, torch.Tensor) and self._is_cuda_graph_recorded_tensor(t)
|
| 774 |
+
]
|
| 775 |
+
|
| 776 |
+
self.static_input_idxs: List[int] = list(
|
| 777 |
+
set(wrapped_function.static_input_idxs) | set(self.cudagraph_managed_idxs)
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
self.static_input_data_ptrs: InputList[int] = [
|
| 781 |
+
(
|
| 782 |
+
inputs[i].data_ptr()
|
| 783 |
+
if isinstance(inputs[i], torch.Tensor) and i in self.static_input_idxs
|
| 784 |
+
else None
|
| 785 |
+
)
|
| 786 |
+
for i in range(len(inputs))
|
| 787 |
+
]
|
| 788 |
+
|
| 789 |
+
# When we checkpoint, and free generations, we will be manually freeing the outputs
|
| 790 |
+
# of CUDAGraphNodes. We should not be freeing parameters, not do we need to account for
|
| 791 |
+
# their liveness (they are static), so we need to compute which outputs are aliases of
|
| 792 |
+
# parameters. Some static inputs are saved tensors from the forward that die in the backward.
|
| 793 |
+
# Their locations are static but lifetimes are not. We only include the persistent static
|
| 794 |
+
# data ptrs below because the non persistent data ptrs may be outputs of this record and
|
| 795 |
+
# fresh allocations.
|
| 796 |
+
|
| 797 |
+
# precompute expanded dims to avoid computing in the hot path
|
| 798 |
+
self.expanded_dims: List[List[int]] = [
|
| 799 |
+
get_expanded_dims(x)
|
| 800 |
+
if isinstance(x, torch.Tensor) and idx not in self.static_input_idxs
|
| 801 |
+
else []
|
| 802 |
+
for idx, x in enumerate(inputs)
|
| 803 |
+
]
|
| 804 |
+
|
| 805 |
+
# For each node in path, which outputs were observed to be live
|
| 806 |
+
# before invoking graph recording, and after graph recording
|
| 807 |
+
self.recorded_liveness_before_graph: LevelList[OutputList[bool]] = []
|
| 808 |
+
self.recorded_liveness_after_graph: LevelList[OutputList[bool]] = []
|
| 809 |
+
|
| 810 |
+
# List of Tuples of (depth, output_index) that index into node at depth
|
| 811 |
+
# number of nodes from root and output_index of outputs. Will index into
|
| 812 |
+
# path_weakrefs.
|
| 813 |
+
self.expected_dead_indices_before_graph: List[PathOutputIndex] = []
|
| 814 |
+
self.expected_dead_indices_after_graph: List[PathOutputIndex] = []
|
| 815 |
+
|
| 816 |
+
# all live indices after graph recording
|
| 817 |
+
self.live_indices_after_graph: List[PathOutputIndex] = []
|
| 818 |
+
|
| 819 |
+
if self.parent is not None:
|
| 820 |
+
previous_liveness = self.parent.recorded_liveness_after_graph
|
| 821 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 822 |
+
|
| 823 |
+
different_indices = self._get_different_indices(
|
| 824 |
+
previous_liveness, curr_liveness
|
| 825 |
+
)
|
| 826 |
+
|
| 827 |
+
self.recorded_liveness_before_graph = curr_liveness
|
| 828 |
+
self.expected_dead_indices_before_graph = different_indices
|
| 829 |
+
|
| 830 |
+
recording_inputs = self._allocate_and_copy_recording_inputs(inputs)
|
| 831 |
+
# recording inputs will copy over memory, so we can free non recording inputs
|
| 832 |
+
inputs.clear()
|
| 833 |
+
del inputs
|
| 834 |
+
|
| 835 |
+
# graph used for recording model invocation
|
| 836 |
+
self.graph = torch.cuda.CUDAGraph()
|
| 837 |
+
|
| 838 |
+
# we allocate non-static inputs within the same memory pool as the CUDAGraph
|
| 839 |
+
# which we will record the model with. For memory efficiency, it is important
|
| 840 |
+
# to reclaim the input memory when the inputs are no longer live. To accomplish this,
|
| 841 |
+
# we reconstruct tensors at the correct data pointers of our inputs which are
|
| 842 |
+
# non owning and do not prevent deallocation. On subsequent executions, input values
|
| 843 |
+
# will be copied over to these tensors.
|
| 844 |
+
self.reconstructed_inputs: InputList[Tensor] = [
|
| 845 |
+
self._reconstruct_from_tensor_metadata(self._tensor_metadata(x))
|
| 846 |
+
if isinstance(x, torch.Tensor)
|
| 847 |
+
else x
|
| 848 |
+
for x in recording_inputs
|
| 849 |
+
]
|
| 850 |
+
|
| 851 |
+
# DO THE RECORDING!!!
|
| 852 |
+
# We record the CUDA graph in the constructor of CUDAGraphNode, which
|
| 853 |
+
# gives you what the CPU side compute of the function would do. We
|
| 854 |
+
# don't throw the recording outputs away: their memory is
|
| 855 |
+
# correctly accounted for in the CUDAGraphs caching allocator. This
|
| 856 |
+
# means on the very FIRST run of the CUDA graph node, we can directly
|
| 857 |
+
# do more recording, because we have a valid caching allocator state.
|
| 858 |
+
# NB: This relies on run() being called immediately after the
|
| 859 |
+
# constructor, otherwise this optimization would not be valid.
|
| 860 |
+
|
| 861 |
+
# initialized below in _record
|
| 862 |
+
|
| 863 |
+
self.checkpointed_caching_state: Optional[AllocatorState] = None
|
| 864 |
+
|
| 865 |
+
# Output Storage Alias information, can be:
|
| 866 |
+
# - A new, unaliased storage, or the output is None
|
| 867 |
+
# - An alias of an output of a prior graph
|
| 868 |
+
# - An alias of an output already created in the reconstructed outputs
|
| 869 |
+
# This is None if the output in question is an int
|
| 870 |
+
self.output_storage_alias: OutputList[Optional[OutputAliasInfo]] = []
|
| 871 |
+
|
| 872 |
+
# is the output Storage unaliased in subsequent outputs, of all subsequent paths
|
| 873 |
+
# if it is, we cached the output tensor and adjust storage liveness tracking to also
|
| 874 |
+
# check if the output tensor does not have an additional python reference.
|
| 875 |
+
# If a descendent node discovers it has an alias of a prior output, then the output
|
| 876 |
+
# will no longer be cached in the ancestor.
|
| 877 |
+
# The large majority of tensors are unaliased, and preserving aliased output tensors would add
|
| 878 |
+
# significant additional complexity with marginal gains
|
| 879 |
+
# The cached tensor outputs are added on the first execution, and cleared whenever we need
|
| 880 |
+
# to do subsequent recording
|
| 881 |
+
self.unaliased_in_all_paths: OutputList[bool] = []
|
| 882 |
+
self.cached_tensor_outputs: OutputList[Optional[Tensor]] = []
|
| 883 |
+
|
| 884 |
+
# if an output aliases a static, persistent input then the corresponding Tensor will
|
| 885 |
+
# be set here. These are different than cached tensors, because they are tensors that
|
| 886 |
+
# are aliases of parameters that are always live.
|
| 887 |
+
self.static_output_tensors: OutputList[Optional[Tensor]] = []
|
| 888 |
+
|
| 889 |
+
# Cleared after recording
|
| 890 |
+
self.recording_outputs: Optional[
|
| 891 |
+
OutputList[Union[torch.Tensor, int]]
|
| 892 |
+
] = self._record(wrapped_function.model, recording_inputs)
|
| 893 |
+
self.outputs_metadata: OutputList[Union[Dict[str, Any], int, None]] = []
|
| 894 |
+
|
| 895 |
+
# As with inputs, we do not want to keep the outputs permanently alive because that would prevent
|
| 896 |
+
# their memory being reclaimed in subsequent cuda graph recordings. We record the tensor metadata
|
| 897 |
+
# needed to reconstruct instead.
|
| 898 |
+
assert self.recording_outputs is not None
|
| 899 |
+
for out in self.recording_outputs:
|
| 900 |
+
if isinstance(out, torch.Tensor):
|
| 901 |
+
self.outputs_metadata.append(
|
| 902 |
+
self._tensor_metadata(out, ignore_storage_offset=False)
|
| 903 |
+
)
|
| 904 |
+
else:
|
| 905 |
+
assert isinstance(out, (int, type(None))), type(out)
|
| 906 |
+
self.outputs_metadata.append(out)
|
| 907 |
+
|
| 908 |
+
self.graph.replay()
|
| 909 |
+
|
| 910 |
+
def _copy_input(self, idx, dst, src):
|
| 911 |
+
expanded_dims = self.expanded_dims[idx]
|
| 912 |
+
dst = index_expanded_dims(dst, expanded_dims)
|
| 913 |
+
src = index_expanded_dims(src, expanded_dims)
|
| 914 |
+
# TODO - one jit kernel across multiple inputs
|
| 915 |
+
dst.copy_(src)
|
| 916 |
+
|
| 917 |
+
def run_first_inputs(self, new_inputs):
|
| 918 |
+
if config.triton.fast_path_cudagraph_asserts:
|
| 919 |
+
self.debug_check_invariants_before_invocation()
|
| 920 |
+
|
| 921 |
+
# graph is already invoked in the __init__
|
| 922 |
+
# inputs are copied over in _allocate_recording_inputs and subsequently cleared
|
| 923 |
+
assert len(new_inputs) == 0
|
| 924 |
+
outputs = self.recording_outputs
|
| 925 |
+
self.recording_outputs = None
|
| 926 |
+
return outputs
|
| 927 |
+
|
| 928 |
+
def run(self, new_inputs):
|
| 929 |
+
if config.triton.fast_path_cudagraph_asserts:
|
| 930 |
+
self.debug_check_invariants_before_invocation()
|
| 931 |
+
|
| 932 |
+
assert len(self.static_input_data_ptrs) == len(new_inputs)
|
| 933 |
+
# NB: this ranges over non-static inputs too
|
| 934 |
+
for idx, data_ptr in enumerate(self.static_input_data_ptrs):
|
| 935 |
+
if idx in self.cudagraph_managed_idxs:
|
| 936 |
+
continue
|
| 937 |
+
if not isinstance(new_inputs[idx], torch.Tensor):
|
| 938 |
+
pass
|
| 939 |
+
elif data_ptr is not None:
|
| 940 |
+
# static input, e.g., parameter
|
| 941 |
+
assert data_ptr == new_inputs[idx].data_ptr()
|
| 942 |
+
else:
|
| 943 |
+
# non-static input, need to copy it into CUDA graph
|
| 944 |
+
dst = self.reconstructed_inputs[idx]
|
| 945 |
+
src = new_inputs[idx]
|
| 946 |
+
self._copy_input(idx, dst, src)
|
| 947 |
+
|
| 948 |
+
new_inputs.clear()
|
| 949 |
+
self.run_graph()
|
| 950 |
+
|
| 951 |
+
outputs = self.reconstruct_outputs()
|
| 952 |
+
self.debug_check_invariants_after_invocation()
|
| 953 |
+
|
| 954 |
+
return outputs
|
| 955 |
+
|
| 956 |
+
def reconstruct_outputs(self):
|
| 957 |
+
"Reconstruct output tensors according to their saved metadata and alias information"
|
| 958 |
+
|
| 959 |
+
# Cached tensors will not yet be set on the first execution
|
| 960 |
+
# They are also cleared in checkpointing, so if we checkpoint this node
|
| 961 |
+
# and then execute it again we will need to repopulate cached tensors
|
| 962 |
+
if not self.cached_tensor_outputs:
|
| 963 |
+
self._initialize_cached_tensors()
|
| 964 |
+
|
| 965 |
+
outputs: List[torch.Tensor] = []
|
| 966 |
+
|
| 967 |
+
for i, (storage_info, metadata) in enumerate(
|
| 968 |
+
zip(self.output_storage_alias, self.outputs_metadata)
|
| 969 |
+
):
|
| 970 |
+
if not isinstance(metadata, dict): # tensor metadata
|
| 971 |
+
assert isinstance(metadata, (int, type(None)))
|
| 972 |
+
outputs.append(metadata)
|
| 973 |
+
continue
|
| 974 |
+
|
| 975 |
+
cached_t = self.cached_tensor_outputs[i]
|
| 976 |
+
if cached_t is not None:
|
| 977 |
+
# No need to update weakrefs, already correctly initialized
|
| 978 |
+
outputs.append(cached_t)
|
| 979 |
+
continue
|
| 980 |
+
|
| 981 |
+
static_t = self.static_output_tensors[i]
|
| 982 |
+
if static_t is not None:
|
| 983 |
+
assert self.outputs_weakrefs[i] is None
|
| 984 |
+
outputs.append(static_t)
|
| 985 |
+
continue
|
| 986 |
+
|
| 987 |
+
storage = self.prepare_alias_info_for_tensor_construction(
|
| 988 |
+
storage_info, metadata
|
| 989 |
+
)
|
| 990 |
+
|
| 991 |
+
if isinstance(storage, UntypedStorage) or storage is None:
|
| 992 |
+
out = self._reconstruct_from_tensor_metadata(metadata, storage)
|
| 993 |
+
else:
|
| 994 |
+
assert isinstance(storage, int)
|
| 995 |
+
out = self._reconstruct_from_tensor_metadata(
|
| 996 |
+
metadata, outputs[storage].untyped_storage()
|
| 997 |
+
)
|
| 998 |
+
|
| 999 |
+
outputs.append(out)
|
| 1000 |
+
w = self.outputs_weakrefs[i]
|
| 1001 |
+
assert w is not None
|
| 1002 |
+
w.swap_weakref(out.untyped_storage()._weak_ref())
|
| 1003 |
+
|
| 1004 |
+
return outputs
|
| 1005 |
+
|
| 1006 |
+
def prepare_alias_info_for_tensor_construction(
|
| 1007 |
+
self,
|
| 1008 |
+
out_alias_info: Optional[OutputAliasInfo],
|
| 1009 |
+
metadata: Union[Dict[str, Any], int, None],
|
| 1010 |
+
) -> Union[UntypedStorage, None, int]:
|
| 1011 |
+
if (
|
| 1012 |
+
isinstance(metadata, (int, type(None)))
|
| 1013 |
+
or out_alias_info is UnaliasedStorage
|
| 1014 |
+
):
|
| 1015 |
+
return None
|
| 1016 |
+
|
| 1017 |
+
if isinstance(out_alias_info, AliasesPriorGraphOutput):
|
| 1018 |
+
depth, existing_output_index = out_alias_info.index
|
| 1019 |
+
ref = self.path_weakrefs[depth][existing_output_index]
|
| 1020 |
+
assert ref is not None
|
| 1021 |
+
return torch.UntypedStorage._new_with_weak_ptr(ref())
|
| 1022 |
+
|
| 1023 |
+
assert isinstance(out_alias_info, AliasesNewOutput)
|
| 1024 |
+
return out_alias_info.index
|
| 1025 |
+
|
| 1026 |
+
def prepare_storages_for_construction(
|
| 1027 |
+
self,
|
| 1028 |
+
) -> List[Union[UntypedStorage, None, int]]:
|
| 1029 |
+
output_storages = []
|
| 1030 |
+
for output_storage_alias, metadata in zip(
|
| 1031 |
+
self.output_storage_alias, self.outputs_metadata
|
| 1032 |
+
):
|
| 1033 |
+
output_storages.append(
|
| 1034 |
+
self.prepare_alias_info_for_tensor_construction(
|
| 1035 |
+
output_storage_alias, metadata
|
| 1036 |
+
)
|
| 1037 |
+
)
|
| 1038 |
+
|
| 1039 |
+
return output_storages
|
| 1040 |
+
|
| 1041 |
+
def run_graph(self):
|
| 1042 |
+
self.graph.replay()
|
| 1043 |
+
|
| 1044 |
+
def all_outputs_are_dead(self):
|
| 1045 |
+
"All outputs of the path from this node to its root are dead"
|
| 1046 |
+
for depth, output_index in self.live_indices_after_graph:
|
| 1047 |
+
if is_live(self.path_weakrefs[depth][output_index]):
|
| 1048 |
+
return False
|
| 1049 |
+
return True
|
| 1050 |
+
|
| 1051 |
+
def _record(self, model, inputs):
|
| 1052 |
+
"Record the model"
|
| 1053 |
+
|
| 1054 |
+
# see: output_is_alias_of_persistent_static_inputs above
|
| 1055 |
+
static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper] = {
|
| 1056 |
+
inputs[i].untyped_storage().data_ptr(): StorageWeakRefWrapper(inputs[i])
|
| 1057 |
+
for i in self.wrapped_function.static_input_idxs
|
| 1058 |
+
if isinstance(inputs[i], torch.Tensor)
|
| 1059 |
+
and not self._is_cuda_graph_recorded_tensor(inputs[i])
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 1063 |
+
# need to use parent live weakrefs because live_indices isnt set yet
|
| 1064 |
+
memory = (
|
| 1065 |
+
[] if self.parent is None else list(self.parent.path_live_weakrefs())
|
| 1066 |
+
)
|
| 1067 |
+
memory += [
|
| 1068 |
+
StorageWeakRefWrapper(elem)
|
| 1069 |
+
for i, elem in enumerate(inputs)
|
| 1070 |
+
if isinstance(elem, torch.Tensor)
|
| 1071 |
+
and i not in self.wrapped_function.static_input_idxs
|
| 1072 |
+
and elem.data_ptr() != 0
|
| 1073 |
+
]
|
| 1074 |
+
check_memory_pool(self.device, self.cuda_graphs_pool, memory)
|
| 1075 |
+
|
| 1076 |
+
with preserve_rng_state(), torch.cuda.device(
|
| 1077 |
+
self.device
|
| 1078 |
+
), clear_cublas_manager(), torch.cuda.graph(
|
| 1079 |
+
self.graph,
|
| 1080 |
+
stream=self.stream,
|
| 1081 |
+
pool=self.cuda_graphs_pool,
|
| 1082 |
+
capture_error_mode="thread_local",
|
| 1083 |
+
), get_history_recording():
|
| 1084 |
+
static_outputs = model(inputs)
|
| 1085 |
+
|
| 1086 |
+
# running model should reclaim memory
|
| 1087 |
+
assert len(inputs) == 0
|
| 1088 |
+
|
| 1089 |
+
if not isinstance(static_outputs, (list, tuple)):
|
| 1090 |
+
static_outputs = (static_outputs,)
|
| 1091 |
+
|
| 1092 |
+
self._add_first_outputs(static_outputs, static_input_persistent_storage_ptrs)
|
| 1093 |
+
|
| 1094 |
+
return static_outputs
|
| 1095 |
+
|
| 1096 |
+
def _add_first_outputs(
|
| 1097 |
+
self,
|
| 1098 |
+
outputs,
|
| 1099 |
+
static_input_persistent_storage_ptrs: Dict[int, StorageWeakRefWrapper],
|
| 1100 |
+
):
|
| 1101 |
+
"Add the outputs from the first invocation of the node and set up metadata"
|
| 1102 |
+
|
| 1103 |
+
# getting liveness before we have added the outputs to path, so the length
|
| 1104 |
+
# of the two lists is equal
|
| 1105 |
+
prev_liveness = self.recorded_liveness_before_graph
|
| 1106 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 1107 |
+
|
| 1108 |
+
delta = self._get_different_indices(prev_liveness, curr_liveness)
|
| 1109 |
+
self.expected_dead_indices_after_graph = delta
|
| 1110 |
+
|
| 1111 |
+
assert len(self.outputs_weakrefs) == 0
|
| 1112 |
+
# index from data pointer to index in outputs
|
| 1113 |
+
output_new_storages_index: Dict[StorageDataPtr, int] = {}
|
| 1114 |
+
|
| 1115 |
+
self.unaliased_in_all_paths = [False for _ in range(len(outputs))]
|
| 1116 |
+
self.static_output_tensors = [None for _ in range(len(outputs))]
|
| 1117 |
+
|
| 1118 |
+
for i, o in enumerate(outputs):
|
| 1119 |
+
if o is None or not isinstance(o, torch.Tensor):
|
| 1120 |
+
self.output_storage_alias.append(UnaliasedStorage)
|
| 1121 |
+
continue
|
| 1122 |
+
|
| 1123 |
+
torch._check(
|
| 1124 |
+
o.is_cuda,
|
| 1125 |
+
lambda: (
|
| 1126 |
+
"Expected all cuda outputs in cuda graph recording. Non cuda output "
|
| 1127 |
+
f"from {self.stack_traces[i] if self.stack_traces else '(unknown)'}"
|
| 1128 |
+
),
|
| 1129 |
+
),
|
| 1130 |
+
|
| 1131 |
+
ref = static_input_persistent_storage_ptrs.get(
|
| 1132 |
+
o.untyped_storage().data_ptr(), None
|
| 1133 |
+
)
|
| 1134 |
+
# also treat empty storages as static outputs because we do not need to manage their lifetime
|
| 1135 |
+
# and they should not participate in checkpointing
|
| 1136 |
+
is_empty_storage = o.data_ptr() == 0
|
| 1137 |
+
if ref and ref() is not None or is_empty_storage:
|
| 1138 |
+
self.output_storage_alias.append(None)
|
| 1139 |
+
self.static_output_tensors[i] = o
|
| 1140 |
+
continue
|
| 1141 |
+
|
| 1142 |
+
path_ref = self._is_alias_of_live_recorded_tensor(o)
|
| 1143 |
+
if path_ref is not None:
|
| 1144 |
+
self._mark_prior_graph_output_as_aliased(path_ref)
|
| 1145 |
+
self.output_storage_alias.append(AliasesPriorGraphOutput(path_ref))
|
| 1146 |
+
continue
|
| 1147 |
+
|
| 1148 |
+
if o.untyped_storage().data_ptr() in output_new_storages_index:
|
| 1149 |
+
index = output_new_storages_index[o.untyped_storage().data_ptr()]
|
| 1150 |
+
self.unaliased_in_all_paths[index] = False
|
| 1151 |
+
self.output_storage_alias.append(AliasesNewOutput(index))
|
| 1152 |
+
continue
|
| 1153 |
+
|
| 1154 |
+
output_new_storages_index[o.untyped_storage().data_ptr()] = i
|
| 1155 |
+
self.output_storage_alias.append(UnaliasedStorage)
|
| 1156 |
+
self.unaliased_in_all_paths[i] = True
|
| 1157 |
+
|
| 1158 |
+
if self.stack_traces is None:
|
| 1159 |
+
self.stack_traces = [None for _ in range(len(outputs))]
|
| 1160 |
+
else:
|
| 1161 |
+
assert len(self.stack_traces) == len(
|
| 1162 |
+
outputs
|
| 1163 |
+
), "Wrong number of stack traces passed in"
|
| 1164 |
+
|
| 1165 |
+
assert not self.outputs_weakrefs
|
| 1166 |
+
for out, static_output_tensor in zip(outputs, self.static_output_tensors):
|
| 1167 |
+
if not isinstance(out, torch.Tensor) or static_output_tensor is not None:
|
| 1168 |
+
self.outputs_weakrefs.append(None)
|
| 1169 |
+
self.tensor_weakrefs.append(None)
|
| 1170 |
+
else:
|
| 1171 |
+
self.outputs_weakrefs.append(StorageWeakRefWrapper(out))
|
| 1172 |
+
self.tensor_weakrefs.append(TensorWeakRef(out))
|
| 1173 |
+
|
| 1174 |
+
self.recorded_liveness_after_graph = self._get_liveness(self.path_weakrefs)
|
| 1175 |
+
self.checkpointed_caching_state = torch._C._cuda_getCheckpointState(
|
| 1176 |
+
self.device, self.cuda_graphs_pool
|
| 1177 |
+
)
|
| 1178 |
+
|
| 1179 |
+
# now, get liveness with outputs added
|
| 1180 |
+
for depth in range(len(self.path_weakrefs)):
|
| 1181 |
+
for output_index in range(len(self.path_weakrefs[depth])):
|
| 1182 |
+
if is_live(self.path_weakrefs[depth][output_index]):
|
| 1183 |
+
self.live_indices_after_graph.append((depth, output_index))
|
| 1184 |
+
|
| 1185 |
+
self.debug_check_invariants_after_invocation()
|
| 1186 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 1187 |
+
check_memory_pool(
|
| 1188 |
+
self.device, self.cuda_graphs_pool, list(self.path_live_weakrefs())
|
| 1189 |
+
)
|
| 1190 |
+
|
| 1191 |
+
def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex):
|
| 1192 |
+
"Remove a graph output from the unaliased, cached tensors in an ancestor node"
|
| 1193 |
+
depth, output_index = index
|
| 1194 |
+
node = list(self._path_from_root)[depth]
|
| 1195 |
+
node.unaliased_in_all_paths[output_index] = False
|
| 1196 |
+
x = self.path_weakrefs[depth][output_index]
|
| 1197 |
+
assert x is not None
|
| 1198 |
+
x.remove_extra_reference()
|
| 1199 |
+
|
| 1200 |
+
def _initialize_cached_tensors(self):
|
| 1201 |
+
# we should not be clearing output_weakrefs, and they should be set in the first
|
| 1202 |
+
# record run
|
| 1203 |
+
assert len(self.outputs_weakrefs) == len(self.outputs_metadata)
|
| 1204 |
+
|
| 1205 |
+
for i, (storage_info, metadata, make_cached) in enumerate(
|
| 1206 |
+
zip(
|
| 1207 |
+
self.output_storage_alias,
|
| 1208 |
+
self.outputs_metadata,
|
| 1209 |
+
self.unaliased_in_all_paths,
|
| 1210 |
+
)
|
| 1211 |
+
):
|
| 1212 |
+
if not make_cached:
|
| 1213 |
+
self.cached_tensor_outputs.append(None)
|
| 1214 |
+
continue
|
| 1215 |
+
|
| 1216 |
+
assert storage_info is UnaliasedStorage
|
| 1217 |
+
assert isinstance(metadata, dict)
|
| 1218 |
+
s = self.create_storage(metadata)
|
| 1219 |
+
out = self._reconstruct_from_tensor_metadata(metadata, storage=s)
|
| 1220 |
+
|
| 1221 |
+
# XXX: let autograd know that there will be an additional reference to the tensor
|
| 1222 |
+
# that can be ignored when deciding whether to do gradient buffer inplacing.
|
| 1223 |
+
# Otherwise, inplacing could differ between tracing and subsequent execution.
|
| 1224 |
+
# For some models we tested this led to inputs no longer being in cudagraph pools,
|
| 1225 |
+
# leading to spurious re-recordings.
|
| 1226 |
+
# It also tells AMP cache that even though the tensor impls cannot be cached
|
| 1227 |
+
# in dtype conversions.
|
| 1228 |
+
|
| 1229 |
+
torch._C._add_cached_tensor(out)
|
| 1230 |
+
|
| 1231 |
+
self_ref = weakref.ref(self)
|
| 1232 |
+
|
| 1233 |
+
# one reference in our array, and calling sys.getrefcount bumps the refcount by one
|
| 1234 |
+
def check_refcount(i):
|
| 1235 |
+
self_loc = self_ref()
|
| 1236 |
+
if self_loc is None:
|
| 1237 |
+
return False
|
| 1238 |
+
return self_loc.get_output_refcount(i) == 2
|
| 1239 |
+
|
| 1240 |
+
check = functools.partial(check_refcount, i=i)
|
| 1241 |
+
|
| 1242 |
+
self.outputs_weakrefs[i] = StorageWeakRefWrapper(out, extra_ref_check=check)
|
| 1243 |
+
self.cached_tensor_outputs.append(out)
|
| 1244 |
+
|
| 1245 |
+
def get_output_refcount(self, index):
|
| 1246 |
+
return sys.getrefcount(self.cached_tensor_outputs[index])
|
| 1247 |
+
|
| 1248 |
+
@property
|
| 1249 |
+
def parent(self):
|
| 1250 |
+
"unwraps the weakref to _parent"
|
| 1251 |
+
return self._parent() if self._parent is not None else None
|
| 1252 |
+
|
| 1253 |
+
@property
|
| 1254 |
+
def _path_to_root(self):
|
| 1255 |
+
"Returns all nodes in the path starting at self and ending at root"
|
| 1256 |
+
node = self
|
| 1257 |
+
while node:
|
| 1258 |
+
yield node
|
| 1259 |
+
node = node.parent
|
| 1260 |
+
|
| 1261 |
+
@property
|
| 1262 |
+
def _path_from_root(self):
|
| 1263 |
+
"Returns all nodes in the path starting at the root and ending at self"
|
| 1264 |
+
nodes = reversed(list(self._path_to_root))
|
| 1265 |
+
yield from nodes
|
| 1266 |
+
|
| 1267 |
+
def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor):
|
| 1268 |
+
"Is this tensor an output of a node in this path"
|
| 1269 |
+
for output_refs in self.path_weakrefs:
|
| 1270 |
+
for storage_weak_ref in output_refs:
|
| 1271 |
+
if storage_weak_ref is None:
|
| 1272 |
+
continue
|
| 1273 |
+
# don't need to check liveness of storage since the cuda graph managed
|
| 1274 |
+
# memory is never released.
|
| 1275 |
+
data_ptr = storage_weak_ref.data_ptr()
|
| 1276 |
+
if t.untyped_storage().data_ptr() == data_ptr:
|
| 1277 |
+
return True
|
| 1278 |
+
|
| 1279 |
+
return False
|
| 1280 |
+
|
| 1281 |
+
def _is_alias_of_live_recorded_tensor(
|
| 1282 |
+
self, t: torch.Tensor
|
| 1283 |
+
) -> Optional[PathOutputIndex]:
|
| 1284 |
+
for depth, output_refs in enumerate(self.path_weakrefs):
|
| 1285 |
+
for output_index, storage_ref in enumerate(output_refs):
|
| 1286 |
+
if (storage_and_ptr := maybe_deref(storage_ref)) is not None:
|
| 1287 |
+
storage, ptr = storage_and_ptr
|
| 1288 |
+
if ptr == t.untyped_storage().data_ptr():
|
| 1289 |
+
return (depth, output_index)
|
| 1290 |
+
|
| 1291 |
+
return None
|
| 1292 |
+
|
| 1293 |
+
@staticmethod
|
| 1294 |
+
def _check_liveness(
|
| 1295 |
+
indices: List[PathOutputIndex],
|
| 1296 |
+
output_refs: List[List[Optional[StorageWeakRefWrapper]]],
|
| 1297 |
+
):
|
| 1298 |
+
"Check that all of the indices specified are dead references"
|
| 1299 |
+
for depth, output_index in indices:
|
| 1300 |
+
w = output_refs[depth][output_index]
|
| 1301 |
+
assert w is not None
|
| 1302 |
+
if w() is not None:
|
| 1303 |
+
return False
|
| 1304 |
+
return True
|
| 1305 |
+
|
| 1306 |
+
def add_child(self, function_id: FunctionID, node: CUDAGraphNode):
|
| 1307 |
+
"Adds node as a a child of self"
|
| 1308 |
+
self.children[function_id].append(node)
|
| 1309 |
+
|
| 1310 |
+
@staticmethod
|
| 1311 |
+
def _get_different_indices(
|
| 1312 |
+
prev: List[List[bool]], curr: List[List[bool]]
|
| 1313 |
+
) -> List[PathOutputIndex]:
|
| 1314 |
+
"Find indices where the two lists differ."
|
| 1315 |
+
dead_indices = []
|
| 1316 |
+
assert len(prev) <= len(curr)
|
| 1317 |
+
for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):
|
| 1318 |
+
assert len(outputs1) == len(outputs2)
|
| 1319 |
+
for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):
|
| 1320 |
+
if output1 != output2:
|
| 1321 |
+
dead_indices.append((i, j))
|
| 1322 |
+
|
| 1323 |
+
return dead_indices
|
| 1324 |
+
|
| 1325 |
+
@staticmethod
|
| 1326 |
+
def _get_liveness(
|
| 1327 |
+
weakrefs: List[List[Optional[StorageWeakRefWrapper]]],
|
| 1328 |
+
) -> List[List[bool]]:
|
| 1329 |
+
"Maps weakrefs to true if the reference is alive and false otherwise"
|
| 1330 |
+
if len(weakrefs) == 0:
|
| 1331 |
+
return []
|
| 1332 |
+
|
| 1333 |
+
return [pytree.tree_map(is_live, outputs) for outputs in weakrefs]
|
| 1334 |
+
|
| 1335 |
+
def debug_assert_invariants(
|
| 1336 |
+
self, expected_liveness: List[List[bool]], newly_dead: List[PathOutputIndex]
|
| 1337 |
+
):
|
| 1338 |
+
if not config.triton.fast_path_cudagraph_asserts:
|
| 1339 |
+
return
|
| 1340 |
+
|
| 1341 |
+
for i, node in enumerate(self._path_from_root):
|
| 1342 |
+
assert self.path_weakrefs[i] is node.outputs_weakrefs
|
| 1343 |
+
|
| 1344 |
+
nodes = list(self._path_from_root)
|
| 1345 |
+
|
| 1346 |
+
live_blocks = get_block_addrs(self.cuda_graphs_pool)
|
| 1347 |
+
|
| 1348 |
+
live_storage_data_ptrs = set()
|
| 1349 |
+
live_storage_weak_ptrs = set()
|
| 1350 |
+
|
| 1351 |
+
for depth, outputs_liveness in enumerate(expected_liveness):
|
| 1352 |
+
for output_idx, output_liveness in enumerate(outputs_liveness):
|
| 1353 |
+
# tensor can die early, but it can't be alive when it should be dead
|
| 1354 |
+
w = self.path_weakrefs[depth][output_idx]
|
| 1355 |
+
if (stor_weak_ptr_and_data_ptr := maybe_deref(w)) is not None:
|
| 1356 |
+
assert output_liveness
|
| 1357 |
+
stor_weak_ptr, stor_data_ptr = stor_weak_ptr_and_data_ptr
|
| 1358 |
+
assert (stor_data_ptr in live_storage_data_ptrs) == (
|
| 1359 |
+
stor_weak_ptr in live_storage_weak_ptrs
|
| 1360 |
+
)
|
| 1361 |
+
live_storage_data_ptrs.add(stor_data_ptr)
|
| 1362 |
+
live_storage_weak_ptrs.add(stor_weak_ptr)
|
| 1363 |
+
|
| 1364 |
+
is_persistent_alias = (
|
| 1365 |
+
nodes[depth].static_output_tensors[output_idx] is not None
|
| 1366 |
+
)
|
| 1367 |
+
|
| 1368 |
+
if is_persistent_alias:
|
| 1369 |
+
assert stor_data_ptr not in live_blocks
|
| 1370 |
+
|
| 1371 |
+
for depth, output_index in newly_dead:
|
| 1372 |
+
assert not is_live(self.path_weakrefs[depth][output_index])
|
| 1373 |
+
|
| 1374 |
+
def debug_check_invariants_before_invocation(self):
|
| 1375 |
+
self.debug_assert_invariants(
|
| 1376 |
+
self.recorded_liveness_before_graph, self.expected_dead_indices_before_graph
|
| 1377 |
+
)
|
| 1378 |
+
|
| 1379 |
+
def debug_check_invariants_after_invocation(self):
|
| 1380 |
+
self.debug_assert_invariants(
|
| 1381 |
+
self.recorded_liveness_before_graph, self.expected_dead_indices_after_graph
|
| 1382 |
+
)
|
| 1383 |
+
|
| 1384 |
+
def data_ptrs_dead_since_invocation(self) -> List[int]:
|
| 1385 |
+
"""
|
| 1386 |
+
Since this node was invoked, return data ptrs of all tensor outputs that have died
|
| 1387 |
+
in the current executing tree path.
|
| 1388 |
+
"""
|
| 1389 |
+
curr_liveness = self._get_liveness(self.path_weakrefs)
|
| 1390 |
+
_get_different_indices = self._get_different_indices(
|
| 1391 |
+
self.recorded_liveness_after_graph, curr_liveness
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
+
path = list(self._path_from_root)
|
| 1395 |
+
ptrs_to_deallocate = []
|
| 1396 |
+
for depth, output_index in _get_different_indices:
|
| 1397 |
+
ptrs_to_deallocate.append(
|
| 1398 |
+
path[depth].outputs_metadata[output_index]["data_ptr"]
|
| 1399 |
+
)
|
| 1400 |
+
|
| 1401 |
+
return ptrs_to_deallocate
|
| 1402 |
+
|
| 1403 |
+
def path_live_weakrefs(self) -> Iterator[StorageWeakRefWrapper]:
|
| 1404 |
+
for i, j in self.live_indices_after_graph:
|
| 1405 |
+
out = self.path_weakrefs[i][j]
|
| 1406 |
+
if out is not None and is_live(out):
|
| 1407 |
+
yield out
|
| 1408 |
+
|
| 1409 |
+
def remove_node_cached_tensors(self):
|
| 1410 |
+
for t in self.cached_tensor_outputs:
|
| 1411 |
+
if t is not None:
|
| 1412 |
+
torch._C._remove_cached_tensor(t)
|
| 1413 |
+
self.cached_tensor_outputs.clear()
|
| 1414 |
+
|
| 1415 |
+
for i, unaliased in enumerate(self.unaliased_in_all_paths):
|
| 1416 |
+
if unaliased:
|
| 1417 |
+
n = self.outputs_weakrefs[i]
|
| 1418 |
+
assert n is not None
|
| 1419 |
+
n.remove_extra_reference()
|
| 1420 |
+
|
| 1421 |
+
def remove_path_cached_tensors(self):
|
| 1422 |
+
for node in self._path_from_root:
|
| 1423 |
+
node.remove_node_cached_tensors()
|
| 1424 |
+
|
| 1425 |
+
def clear_path_state(self):
|
| 1426 |
+
"Clear the path state in this current executing node"
|
| 1427 |
+
# this doesnt actually do anything right now, leaving it as placeholder
|
| 1428 |
+
pass
|
| 1429 |
+
|
| 1430 |
+
@staticmethod
|
| 1431 |
+
def _tensor_metadata(x, ignore_storage_offset=True):
|
| 1432 |
+
assert isinstance(x, torch.Tensor)
|
| 1433 |
+
# We ignore the storage offset for inputs, but not for outputs
|
| 1434 |
+
# TODO: - should we make the storage resizable ?
|
| 1435 |
+
return {
|
| 1436 |
+
"nbytes": x.untyped_storage().nbytes(),
|
| 1437 |
+
"data_ptr": x.untyped_storage().data_ptr(),
|
| 1438 |
+
"size": x.shape,
|
| 1439 |
+
"stride": x.stride(),
|
| 1440 |
+
"dtype": x.dtype,
|
| 1441 |
+
"device": x.device,
|
| 1442 |
+
"storage_offset": x.storage_offset() if not ignore_storage_offset else 0,
|
| 1443 |
+
}
|
| 1444 |
+
|
| 1445 |
+
def _reconstruct_from_tensor_metadata(
|
| 1446 |
+
self, metadata: Dict[str, Any], storage=None
|
| 1447 |
+
) -> Tensor:
|
| 1448 |
+
s = self.create_storage(metadata) if storage is None else storage
|
| 1449 |
+
return torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata(metadata, s)
|
| 1450 |
+
|
| 1451 |
+
def create_storage(self, metadata):
|
| 1452 |
+
return torch._C._construct_storage_from_data_pointer(
|
| 1453 |
+
metadata["data_ptr"], metadata["device"], metadata["nbytes"]
|
| 1454 |
+
)
|
| 1455 |
+
|
| 1456 |
+
def _allocate_and_copy_recording_inputs(
|
| 1457 |
+
self, inputs
|
| 1458 |
+
) -> List[Union[torch.Tensor, int]]:
|
| 1459 |
+
"""
|
| 1460 |
+
Allocate inputs for non static, non cudagraph managraphed managed tensors in the memory pool
|
| 1461 |
+
and copy over the tensor values.
|
| 1462 |
+
"""
|
| 1463 |
+
|
| 1464 |
+
torch.cuda.synchronize()
|
| 1465 |
+
self.stream.wait_stream(torch.cuda.current_stream())
|
| 1466 |
+
recording_inputs = []
|
| 1467 |
+
|
| 1468 |
+
with warnings.catch_warnings(record=True), torch.cuda.device(
|
| 1469 |
+
self.device
|
| 1470 |
+
), _use_cuda_memory_pool_manager(
|
| 1471 |
+
self.device,
|
| 1472 |
+
mem_pool=self.cuda_graphs_pool,
|
| 1473 |
+
stream=self.stream,
|
| 1474 |
+
):
|
| 1475 |
+
for i, inp in enumerate(inputs):
|
| 1476 |
+
if not isinstance(inp, torch.Tensor):
|
| 1477 |
+
assert isinstance(inp, int)
|
| 1478 |
+
recording_inputs.append(inp)
|
| 1479 |
+
elif i not in self.static_input_idxs:
|
| 1480 |
+
# static_input does an allocation!
|
| 1481 |
+
recording_inputs.append(static_input(inp))
|
| 1482 |
+
# copy over and clear non recording input
|
| 1483 |
+
self._copy_input(i, recording_inputs[-1], inp)
|
| 1484 |
+
inputs[i] = None
|
| 1485 |
+
del inp
|
| 1486 |
+
else:
|
| 1487 |
+
recording_inputs.append(inp)
|
| 1488 |
+
|
| 1489 |
+
return recording_inputs
|
| 1490 |
+
|
| 1491 |
+
def check_invariants(self, inputs: List[Tensor]) -> bool:
|
| 1492 |
+
"""
|
| 1493 |
+
Checks if this node can be run. The same pattern of tensor liveness and tensors
|
| 1494 |
+
managed in the cudagraph private pool must remain stable.
|
| 1495 |
+
"""
|
| 1496 |
+
|
| 1497 |
+
# previously managed data pointers remain stable
|
| 1498 |
+
for idx in self.cudagraph_managed_idxs:
|
| 1499 |
+
if inputs[idx].data_ptr() != self.static_input_data_ptrs[idx]:
|
| 1500 |
+
return False
|
| 1501 |
+
|
| 1502 |
+
if not self._check_liveness(
|
| 1503 |
+
self.expected_dead_indices_before_graph, self.path_weakrefs
|
| 1504 |
+
):
|
| 1505 |
+
return False
|
| 1506 |
+
|
| 1507 |
+
# the cudagraph managed tensors which died upon recording must also die upon
|
| 1508 |
+
# this invocation. it is too late to check after we've replayed the graph,
|
| 1509 |
+
# because we would have already written over their memory.
|
| 1510 |
+
for idx in self.cudagraph_managed_idxs:
|
| 1511 |
+
inputs[idx] = None
|
| 1512 |
+
|
| 1513 |
+
torch._check(
|
| 1514 |
+
self._check_liveness(
|
| 1515 |
+
self.expected_dead_indices_after_graph, self.path_weakrefs
|
| 1516 |
+
),
|
| 1517 |
+
lambda: "TODO: graph recording observed an input tensor deallocate during graph "
|
| 1518 |
+
" recording that did not occur during replay. Please file an issue.",
|
| 1519 |
+
)
|
| 1520 |
+
return True
|
| 1521 |
+
|
| 1522 |
+
def num_descendants(self) -> int:
|
| 1523 |
+
"Total number of descendents of this node"
|
| 1524 |
+
num_desc = 0
|
| 1525 |
+
for children in self.children.values():
|
| 1526 |
+
for child in children:
|
| 1527 |
+
num_desc += 1
|
| 1528 |
+
num_desc += child.num_descendants()
|
| 1529 |
+
return num_desc
|
| 1530 |
+
|
| 1531 |
+
|
| 1532 |
+
def get_cudagraph_segments(pool_id):
|
| 1533 |
+
segments = torch.cuda.memory_snapshot()
|
| 1534 |
+
return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
|
| 1535 |
+
|
| 1536 |
+
|
| 1537 |
+
def get_block_addrs(pool_id, live_only=True):
|
| 1538 |
+
blocks = []
|
| 1539 |
+
|
| 1540 |
+
for segment in get_cudagraph_segments(pool_id):
|
| 1541 |
+
addr = segment["address"]
|
| 1542 |
+
for block in segment["blocks"]:
|
| 1543 |
+
if block["state"] == "active_allocated" or not live_only:
|
| 1544 |
+
blocks.append(addr)
|
| 1545 |
+
|
| 1546 |
+
addr += block["size"]
|
| 1547 |
+
|
| 1548 |
+
return blocks
|
| 1549 |
+
|
| 1550 |
+
|
| 1551 |
+
def format_tb(frames):
|
| 1552 |
+
formatted_traceback = []
|
| 1553 |
+
|
| 1554 |
+
for entry in frames:
|
| 1555 |
+
formatted_traceback.append(
|
| 1556 |
+
traceback.FrameSummary(entry["filename"], entry["line"], entry["name"])
|
| 1557 |
+
)
|
| 1558 |
+
|
| 1559 |
+
return "".join(traceback.format_list(formatted_traceback))
|
| 1560 |
+
|
| 1561 |
+
|
| 1562 |
+
def check_memory_pool(device, pool_id, live_storages_ptrs: List[StorageWeakRefWrapper]):
|
| 1563 |
+
assert all(
|
| 1564 |
+
isinstance(elem, StorageWeakRefWrapper) for elem in live_storages_ptrs
|
| 1565 |
+
) # noqa: C419
|
| 1566 |
+
unique_storages = {stor.data_ptr() for stor in live_storages_ptrs if stor()}
|
| 1567 |
+
|
| 1568 |
+
# check if there is a divergence first, then do the expensive snapshot call after
|
| 1569 |
+
# we know it will error
|
| 1570 |
+
if torch._C._cuda_checkPoolLiveAllocations(device, pool_id, unique_storages):
|
| 1571 |
+
return
|
| 1572 |
+
|
| 1573 |
+
# at this point we are past the fast-path. we have seen rare cases where a dead tensor is dead,
|
| 1574 |
+
# but hasn't been gc'd yet, and gives false positive for allocated_not_in_live_storages
|
| 1575 |
+
gc.collect()
|
| 1576 |
+
|
| 1577 |
+
segments = get_cudagraph_segments(pool_id)
|
| 1578 |
+
|
| 1579 |
+
allocated_not_in_live_storages = {}
|
| 1580 |
+
|
| 1581 |
+
for segment in segments:
|
| 1582 |
+
addr = segment["address"]
|
| 1583 |
+
for block in segment["blocks"]:
|
| 1584 |
+
if block["state"] == "active_allocated":
|
| 1585 |
+
if addr not in unique_storages:
|
| 1586 |
+
allocated_not_in_live_storages[addr] = block
|
| 1587 |
+
else:
|
| 1588 |
+
unique_storages.remove(addr)
|
| 1589 |
+
|
| 1590 |
+
addr += block["size"]
|
| 1591 |
+
|
| 1592 |
+
torch._check(
|
| 1593 |
+
len(unique_storages) == 0,
|
| 1594 |
+
lambda: f"These storage data ptrs are not allocated in pool {pool_id} but should be {unique_storages}",
|
| 1595 |
+
)
|
| 1596 |
+
|
| 1597 |
+
if allocated_not_in_live_storages != 0:
|
| 1598 |
+
formatted = []
|
| 1599 |
+
for dp, block in allocated_not_in_live_storages.items():
|
| 1600 |
+
trace = format_tb(block.get("frames", []))
|
| 1601 |
+
formatted.append(f"Data Pointer: {dp}, history: \n{trace}")
|
| 1602 |
+
formatted_s = "\n".join(formatted)
|
| 1603 |
+
msg = (
|
| 1604 |
+
f"These live storage data ptrs are in the cudagraph pool but not "
|
| 1605 |
+
f"accounted for as an output of cudagraph trees: \n\n{formatted_s}"
|
| 1606 |
+
)
|
| 1607 |
+
raise RuntimeError(msg)
|
| 1608 |
+
|
| 1609 |
+
|
| 1610 |
+
class ExecutionState(Enum):
|
| 1611 |
+
"""
|
| 1612 |
+
Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated
|
| 1613 |
+
in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.
|
| 1614 |
+
"""
|
| 1615 |
+
|
| 1616 |
+
NONE = auto()
|
| 1617 |
+
WARMUP = auto()
|
| 1618 |
+
RECORDING = auto()
|
| 1619 |
+
EXECUTION = auto()
|
| 1620 |
+
|
| 1621 |
+
|
| 1622 |
+
class CompilationMode(Enum):
|
| 1623 |
+
FORWARD = auto()
|
| 1624 |
+
BACKWARD = auto()
|
| 1625 |
+
INFERENCE = auto()
|
| 1626 |
+
|
| 1627 |
+
|
| 1628 |
+
class CUDAGraphTreeManager:
|
| 1629 |
+
"""
|
| 1630 |
+
Groups individual recordings or executions of cuda graphs into a tree of recordings,
|
| 1631 |
+
and checks required invariants, and manages warmups of graphs.
|
| 1632 |
+
|
| 1633 |
+
When graphs are recorded in the same tree, it enforces subsequent execution
|
| 1634 |
+
to follow the same order and have the same output tensor livespans. To remove
|
| 1635 |
+
unnecessary coupling of cuda graphs (and additional imposed invariants),
|
| 1636 |
+
the tree manager will end a currently recording tree whenever it is valid - when
|
| 1637 |
+
the memory pool no longer has any live allocations.
|
| 1638 |
+
|
| 1639 |
+
We ignore outputs from a previous generation that correspond to prior model outputs.
|
| 1640 |
+
Currently this is hardcoded `GenerationTracker.generation` tracked in torch dynamo.
|
| 1641 |
+
# TODO: make generation increment configurable, warn on overwrite.
|
| 1642 |
+
|
| 1643 |
+
We run graph warmups in the cudagraph memory pool and return the result on the first invocation
|
| 1644 |
+
of a function. For many models it is important to reclaim activations as you run the backward.
|
| 1645 |
+
If we were to warm up the model and keep an extra copy of the inputs around to subsequently
|
| 1646 |
+
use for recording, we would incur a memory penalty. Additionally, if we are part way through training
|
| 1647 |
+
your model and need to recompile, memory will be allocated to the cuda graph pool, so we run this
|
| 1648 |
+
warmup run in the cuda graph memory pool. As for recording, warm up needs the state of live tensors
|
| 1649 |
+
to be accurately reflected so we checkpoint the allocator state if we need to warm up following graph
|
| 1650 |
+
replay.
|
| 1651 |
+
"""
|
| 1652 |
+
|
| 1653 |
+
def __init__(self, device_index: int):
|
| 1654 |
+
# roots are functions which have no dependencies on an other node. I.e.,
|
| 1655 |
+
# when they are first invoked, none of their inputs are outputs are outputs
|
| 1656 |
+
# of another node, nor are there any live outputs of another node whose
|
| 1657 |
+
# liveness would create a dependency.
|
| 1658 |
+
self.roots: Dict[FunctionID, List[CUDAGraphNode]] = defaultdict(list)
|
| 1659 |
+
|
| 1660 |
+
# mapping from function id to wrapped function
|
| 1661 |
+
self.ids_to_funcs: Dict[FunctionID, WrappedFunction] = {}
|
| 1662 |
+
|
| 1663 |
+
self.ids_to_stack_traces: Dict[FunctionID, StackTraces] = {}
|
| 1664 |
+
|
| 1665 |
+
self.warmed_up_functions: Set[FunctionID] = set()
|
| 1666 |
+
# if we fail to increment generation, and are stuck warming up,
|
| 1667 |
+
# only warn on each function once
|
| 1668 |
+
self.warned_functions: Set[FunctionID] = set()
|
| 1669 |
+
torch._C._set_cached_tensors_enabled(True)
|
| 1670 |
+
|
| 1671 |
+
# NB: cuda caching allocator will remember the stream a segment is allocated to
|
| 1672 |
+
# and only allocate that segment to the same stream. we need to use a single stream
|
| 1673 |
+
# for all allocations to the memory pool, otherwise the allocations to separate streams
|
| 1674 |
+
# will not be reused; separate recordings would have use the same memory pool, but not
|
| 1675 |
+
# the same memory.
|
| 1676 |
+
|
| 1677 |
+
with torch.cuda.device(device_index):
|
| 1678 |
+
torch.cuda.synchronize()
|
| 1679 |
+
self.stream = torch.cuda.Stream()
|
| 1680 |
+
self.stream.wait_stream(torch.cuda.current_stream())
|
| 1681 |
+
|
| 1682 |
+
self.cuda_graphs_thread_pool = torch.cuda.graph_pool_handle()
|
| 1683 |
+
# Keeps Memory Pool Alive
|
| 1684 |
+
self.graph = torch.cuda.CUDAGraph()
|
| 1685 |
+
|
| 1686 |
+
self.cuda_graphs_thread_pool = torch.cuda.graph_pool_handle()
|
| 1687 |
+
|
| 1688 |
+
with warnings.catch_warnings(record=True), torch.cuda.graph(
|
| 1689 |
+
self.graph,
|
| 1690 |
+
pool=self.cuda_graphs_thread_pool,
|
| 1691 |
+
stream=self.stream,
|
| 1692 |
+
capture_error_mode="thread_local",
|
| 1693 |
+
):
|
| 1694 |
+
pass
|
| 1695 |
+
|
| 1696 |
+
self.graph_counter = itertools.count(0)
|
| 1697 |
+
self.func_counter = itertools.count(0)
|
| 1698 |
+
|
| 1699 |
+
# whether we the current node is in a state of warmup, recording, execution. If
|
| 1700 |
+
# there is no current node the state will be ExecutionState.None.
|
| 1701 |
+
self.path_state = ExecutionState.NONE
|
| 1702 |
+
self.device_index = device_index
|
| 1703 |
+
|
| 1704 |
+
# the most recently invoked cudagraph wrapping of a function. Will be None
|
| 1705 |
+
# when there is no output from a previous recording or execution whose memory
|
| 1706 |
+
# we need to respect in the cuda caching allocation. If you incremented generation,
|
| 1707 |
+
# this will also be none, as ignore those allocations.
|
| 1708 |
+
self.current_node: Optional[CUDAGraphNode] = None
|
| 1709 |
+
|
| 1710 |
+
# current generation of cudagraph invocations. when torch.compile is run
|
| 1711 |
+
# we increment the current generation. are willing to ignore live outputs
|
| 1712 |
+
# of a previous generation in checking liveness.
|
| 1713 |
+
self.current_gen: int = -1
|
| 1714 |
+
|
| 1715 |
+
# number of instances we are in execution and failed to match to an
|
| 1716 |
+
# existing child
|
| 1717 |
+
self.debug_fail_counter = 0
|
| 1718 |
+
# number of instances we had to checkpoint the function
|
| 1719 |
+
self.debug_checkpointing_counter = 0
|
| 1720 |
+
|
| 1721 |
+
self.id_to_mode: Dict[FunctionID, CompilationMode] = {}
|
| 1722 |
+
|
| 1723 |
+
# Note: [Backward Generation Handling]
|
| 1724 |
+
# We generally perform a sequence of forward executions followed by backward executions.
|
| 1725 |
+
# If multiple torch.compile wrapped forwards are executed with their backwards pending,
|
| 1726 |
+
# we should not disregard the outputs from a prior torch.compile since the entire training
|
| 1727 |
+
# loop hasn't completed. Occasionally, a backward pass corresponding to a forward pass may
|
| 1728 |
+
# not be executed, so we cannot wait for all pending forward pass backward completions, so
|
| 1729 |
+
# we cannot wait for all backwards to have been invoked. Instead we wait for a single backward
|
| 1730 |
+
# invocation. Triggering a backward pass typically doesn't lead to another torch.compile
|
| 1731 |
+
# invocation, making it less likely for the generation to increase between multiple
|
| 1732 |
+
# backward calls. The following use case is covered by this approach:
|
| 1733 |
+
# mod1 = torch.compile(...)
|
| 1734 |
+
# mod2 = torch.compile(...)
|
| 1735 |
+
# mod2(mod1(x)).sum().backward()
|
| 1736 |
+
|
| 1737 |
+
self.running_forwards_with_pending_backwards = False
|
| 1738 |
+
|
| 1739 |
+
def run(self, new_inputs: List[Tensor], function_id: FunctionID):
|
| 1740 |
+
assert self.graph is not None, "Running CUDAGraph after shutdown"
|
| 1741 |
+
out = self._run(new_inputs, function_id)
|
| 1742 |
+
|
| 1743 |
+
# The forwards are only pending following invocation, not before
|
| 1744 |
+
mode = self.id_to_mode[function_id]
|
| 1745 |
+
if mode == CompilationMode.FORWARD:
|
| 1746 |
+
self.running_forwards_with_pending_backwards = True
|
| 1747 |
+
elif mode == CompilationMode.BACKWARD:
|
| 1748 |
+
self.running_forwards_with_pending_backwards = False
|
| 1749 |
+
|
| 1750 |
+
return out
|
| 1751 |
+
|
| 1752 |
+
def set_to_running_backward(self):
|
| 1753 |
+
self.running_forwards_with_pending_backwards = False
|
| 1754 |
+
|
| 1755 |
+
def _run(self, new_inputs: List[Tensor], function_id: FunctionID):
|
| 1756 |
+
# we will try to end the current execution lazily, since
|
| 1757 |
+
# we dont want to do unnecessary checking of the existing outputs
|
| 1758 |
+
# on the hot path, but both recording and warmup only happen once
|
| 1759 |
+
# so we check up front
|
| 1760 |
+
if self.in_recording:
|
| 1761 |
+
self.try_end_curr_recording(function_id)
|
| 1762 |
+
|
| 1763 |
+
if self.in_warmup:
|
| 1764 |
+
self.try_end_curr_warmup(function_id)
|
| 1765 |
+
|
| 1766 |
+
# warming up a function and subsequentally recording may use different memory addresses
|
| 1767 |
+
# because both depend on the state of the caching allocator. if we warm up graph A,
|
| 1768 |
+
# then warm up graph B and make more allocations, the subsequent recording of A will not
|
| 1769 |
+
# necessarily use the same addresses as in the warm up. Thus any warm up of a node can only
|
| 1770 |
+
# be followed by warm up runs.
|
| 1771 |
+
if (
|
| 1772 |
+
not (
|
| 1773 |
+
function_id in self.warmed_up_functions
|
| 1774 |
+
or config.triton.skip_cudagraph_warmup
|
| 1775 |
+
)
|
| 1776 |
+
) or self.in_warmup:
|
| 1777 |
+
# If we are in the middle of executing cuda graphs, then we need to checkpoint memory state.
|
| 1778 |
+
# Both Recording and Warmup will be reflected in the allocator and dont need changes
|
| 1779 |
+
if self.path_state == ExecutionState.EXECUTION:
|
| 1780 |
+
self.apply_checkpoint_execution_state_in_allocator()
|
| 1781 |
+
|
| 1782 |
+
return self.run_eager(new_inputs, function_id)
|
| 1783 |
+
|
| 1784 |
+
child_nodes = (
|
| 1785 |
+
self.roots if self.current_node is None else self.current_node.children
|
| 1786 |
+
)
|
| 1787 |
+
|
| 1788 |
+
if not self.in_recording:
|
| 1789 |
+
for child in child_nodes[function_id]:
|
| 1790 |
+
# here we are checking memory consistency between recording and execution,
|
| 1791 |
+
# as well as things like stability of tensor locations, etc
|
| 1792 |
+
# and other
|
| 1793 |
+
if child.check_invariants(new_inputs):
|
| 1794 |
+
return self.execute_node(child, new_inputs)
|
| 1795 |
+
|
| 1796 |
+
# now that we know the new function can't be run as a child of the
|
| 1797 |
+
# current node, if it is a root, try to end the current execution.
|
| 1798 |
+
# as noted above, we want to do this lazily to avoid having to
|
| 1799 |
+
# check all existing outputs
|
| 1800 |
+
if self.current_node is not None and function_id in self.roots:
|
| 1801 |
+
self.try_end_curr_execution()
|
| 1802 |
+
|
| 1803 |
+
# run again to hit the root matching case which must succeed
|
| 1804 |
+
if self.current_node is None:
|
| 1805 |
+
return self.run(new_inputs, function_id)
|
| 1806 |
+
|
| 1807 |
+
# at this point, we necessarily will do a new recording
|
| 1808 |
+
self.debug_fail_counter += 1
|
| 1809 |
+
|
| 1810 |
+
self.try_end_curr_execution()
|
| 1811 |
+
if self.current_node is not None:
|
| 1812 |
+
self.apply_checkpoint_execution_state_in_allocator()
|
| 1813 |
+
|
| 1814 |
+
# now, we are in a recording state !
|
| 1815 |
+
return self.record_function(new_inputs, function_id)
|
| 1816 |
+
|
| 1817 |
+
def shutdown(self):
|
| 1818 |
+
"""
|
| 1819 |
+
Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn
|
| 1820 |
+
might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown
|
| 1821 |
+
to avoid a reference cycle.
|
| 1822 |
+
"""
|
| 1823 |
+
nodes = []
|
| 1824 |
+
for roots in self.roots.values():
|
| 1825 |
+
nodes.extend(roots)
|
| 1826 |
+
|
| 1827 |
+
while nodes:
|
| 1828 |
+
node = nodes.pop()
|
| 1829 |
+
for children in node.children.values():
|
| 1830 |
+
nodes.extend(children)
|
| 1831 |
+
node.remove_node_cached_tensors()
|
| 1832 |
+
node.graph = None
|
| 1833 |
+
|
| 1834 |
+
self.graph = None
|
| 1835 |
+
self.roots = None # type: ignore[assignment]
|
| 1836 |
+
self.current_node = None
|
| 1837 |
+
|
| 1838 |
+
def record_function(self, new_inputs, function_id) -> List[Optional[Tensor]]:
|
| 1839 |
+
graph_id = self.new_graph_id()
|
| 1840 |
+
log.debug(
|
| 1841 |
+
"Recording function %d of graph recording id %d",
|
| 1842 |
+
function_id.id,
|
| 1843 |
+
graph_id.id,
|
| 1844 |
+
)
|
| 1845 |
+
torch.cuda.synchronize()
|
| 1846 |
+
node = CUDAGraphNode(
|
| 1847 |
+
self.ids_to_funcs[function_id],
|
| 1848 |
+
graph_id,
|
| 1849 |
+
self.current_node,
|
| 1850 |
+
new_inputs,
|
| 1851 |
+
self.cuda_graphs_thread_pool,
|
| 1852 |
+
self.device_index,
|
| 1853 |
+
self.ids_to_stack_traces[function_id],
|
| 1854 |
+
self.stream,
|
| 1855 |
+
)
|
| 1856 |
+
if self.current_node is None:
|
| 1857 |
+
self.roots[function_id].append(node)
|
| 1858 |
+
else:
|
| 1859 |
+
self.current_node.add_child(function_id, node)
|
| 1860 |
+
self.current_node = node
|
| 1861 |
+
self.path_state = ExecutionState.RECORDING
|
| 1862 |
+
self.update_generation()
|
| 1863 |
+
torch.cuda.synchronize()
|
| 1864 |
+
return node.run_first_inputs(new_inputs)
|
| 1865 |
+
|
| 1866 |
+
def execute_node(self, node: CUDAGraphNode, new_inputs) -> List[Optional[Tensor]]:
|
| 1867 |
+
self.current_node = node
|
| 1868 |
+
self.path_state = ExecutionState.EXECUTION
|
| 1869 |
+
self.update_generation()
|
| 1870 |
+
return node.run(new_inputs)
|
| 1871 |
+
|
| 1872 |
+
def run_eager(self, new_inputs, function_id: FunctionID):
|
| 1873 |
+
# this is only stored on current node, because when we start a new path,
|
| 1874 |
+
# we will deallocate it
|
| 1875 |
+
already_warm = function_id in self.warmed_up_functions
|
| 1876 |
+
if not already_warm:
|
| 1877 |
+
log.debug("Running warmup of function %d", function_id.id)
|
| 1878 |
+
else:
|
| 1879 |
+
log.debug(
|
| 1880 |
+
"Running eager of function %d because ancestor needed to warm up",
|
| 1881 |
+
function_id.id,
|
| 1882 |
+
)
|
| 1883 |
+
self.warmed_up_functions.add(function_id)
|
| 1884 |
+
node = CUDAWarmupNode(
|
| 1885 |
+
self.ids_to_funcs[function_id],
|
| 1886 |
+
self.current_node,
|
| 1887 |
+
self.cuda_graphs_thread_pool,
|
| 1888 |
+
self.graph,
|
| 1889 |
+
self.device_index,
|
| 1890 |
+
self.ids_to_stack_traces[function_id],
|
| 1891 |
+
self.stream,
|
| 1892 |
+
already_warm,
|
| 1893 |
+
)
|
| 1894 |
+
self.current_node = node
|
| 1895 |
+
self.path_state = ExecutionState.WARMUP
|
| 1896 |
+
self.update_generation()
|
| 1897 |
+
return node.run(new_inputs)
|
| 1898 |
+
|
| 1899 |
+
def new_graph_id(self) -> GraphID:
|
| 1900 |
+
return GraphID(next(self.graph_counter))
|
| 1901 |
+
|
| 1902 |
+
def new_func_id(self) -> FunctionID:
|
| 1903 |
+
return FunctionID(next(self.func_counter))
|
| 1904 |
+
|
| 1905 |
+
def add_function(
|
| 1906 |
+
self,
|
| 1907 |
+
model,
|
| 1908 |
+
inputs,
|
| 1909 |
+
static_input_idxs,
|
| 1910 |
+
stack_traces,
|
| 1911 |
+
mode,
|
| 1912 |
+
) -> Tuple[Callable[..., Any], List[Optional[Tensor]]]:
|
| 1913 |
+
id = self.new_func_id()
|
| 1914 |
+
self.ids_to_stack_traces[id] = stack_traces
|
| 1915 |
+
self.ids_to_funcs[id] = WrappedFunction(model, static_input_idxs, id)
|
| 1916 |
+
self.id_to_mode[id] = mode
|
| 1917 |
+
fn = functools.partial(self.run, function_id=id)
|
| 1918 |
+
|
| 1919 |
+
# container needs to set clean up when fn dies
|
| 1920 |
+
get_container(self.device_index).add_strong_reference(fn)
|
| 1921 |
+
return fn, fn(inputs)
|
| 1922 |
+
|
| 1923 |
+
@property
|
| 1924 |
+
def in_recording(self):
|
| 1925 |
+
return self.path_state == ExecutionState.RECORDING
|
| 1926 |
+
|
| 1927 |
+
@property
|
| 1928 |
+
def in_warmup(self):
|
| 1929 |
+
return self.path_state == ExecutionState.WARMUP
|
| 1930 |
+
|
| 1931 |
+
def get_roots(self) -> Iterator[CUDAGraphNode]:
|
| 1932 |
+
for nodes in self.roots.values():
|
| 1933 |
+
yield from nodes
|
| 1934 |
+
|
| 1935 |
+
@property
|
| 1936 |
+
def current_node(self):
|
| 1937 |
+
return self._current_node
|
| 1938 |
+
|
| 1939 |
+
@current_node.setter
|
| 1940 |
+
def current_node(self, value):
|
| 1941 |
+
self._current_node = value
|
| 1942 |
+
if value is None:
|
| 1943 |
+
self.path_state = ExecutionState.NONE
|
| 1944 |
+
|
| 1945 |
+
def update_generation(self):
|
| 1946 |
+
self.current_gen = self.get_curr_generation()
|
| 1947 |
+
|
| 1948 |
+
@staticmethod
|
| 1949 |
+
def get_curr_generation() -> int:
|
| 1950 |
+
if MarkStepBox.mark_step_counter != 0:
|
| 1951 |
+
return MarkStepBox.mark_step_counter
|
| 1952 |
+
|
| 1953 |
+
return GenerationTracker.generation
|
| 1954 |
+
|
| 1955 |
+
@staticmethod
|
| 1956 |
+
def user_invoked_mark_step():
|
| 1957 |
+
return MarkStepBox.mark_step_counter != 0
|
| 1958 |
+
|
| 1959 |
+
def can_start_new_generation(self) -> bool:
|
| 1960 |
+
if not self.in_new_torch_compile_invocation():
|
| 1961 |
+
return False
|
| 1962 |
+
|
| 1963 |
+
if self.user_invoked_mark_step():
|
| 1964 |
+
return True
|
| 1965 |
+
|
| 1966 |
+
return not self.running_forwards_with_pending_backwards
|
| 1967 |
+
|
| 1968 |
+
def in_new_torch_compile_invocation(self):
|
| 1969 |
+
return self.current_gen != self.get_curr_generation()
|
| 1970 |
+
|
| 1971 |
+
def try_end_curr_recording(self, function_id: FunctionID) -> None:
|
| 1972 |
+
"""
|
| 1973 |
+
Check if the current recording can be terminated, either because all outputs of the
|
| 1974 |
+
previously recorded node are dead or because it was executed in a different
|
| 1975 |
+
generation. Will set current_node to None and in_recording to False if successful.
|
| 1976 |
+
"""
|
| 1977 |
+
assert self.in_recording
|
| 1978 |
+
assert self.current_node is not None
|
| 1979 |
+
|
| 1980 |
+
# multiple invocations, allow overwriting the previous generation
|
| 1981 |
+
if self.can_start_new_generation():
|
| 1982 |
+
self.dealloc_current_path_weakrefs()
|
| 1983 |
+
self.clear_current_path_state_and_set_to_none()
|
| 1984 |
+
return
|
| 1985 |
+
|
| 1986 |
+
if self.current_node.all_outputs_are_dead():
|
| 1987 |
+
self.clear_current_path_state_and_set_to_none()
|
| 1988 |
+
return
|
| 1989 |
+
|
| 1990 |
+
self.check_warn_on_unable_to_start_executing(function_id)
|
| 1991 |
+
|
| 1992 |
+
def try_end_curr_execution(self) -> None:
|
| 1993 |
+
"""
|
| 1994 |
+
Check if the current executing node can be terminated, either because all outputs of the
|
| 1995 |
+
previously executed node are dead or because it was executed in a different generation.
|
| 1996 |
+
Will set current_node to None if successful.
|
| 1997 |
+
"""
|
| 1998 |
+
|
| 1999 |
+
assert not self.in_recording
|
| 2000 |
+
if self.current_node is None:
|
| 2001 |
+
return
|
| 2002 |
+
|
| 2003 |
+
if self.can_start_new_generation():
|
| 2004 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2005 |
+
return
|
| 2006 |
+
|
| 2007 |
+
if self.current_node.all_outputs_are_dead():
|
| 2008 |
+
self.clear_current_path_state_and_set_to_none()
|
| 2009 |
+
|
| 2010 |
+
def try_end_curr_warmup(self, function_id: FunctionID):
|
| 2011 |
+
if self.can_start_new_generation():
|
| 2012 |
+
self.dealloc_current_path_weakrefs()
|
| 2013 |
+
self.current_node = None
|
| 2014 |
+
return
|
| 2015 |
+
|
| 2016 |
+
if self.current_node.all_outputs_are_dead():
|
| 2017 |
+
self.current_node = None
|
| 2018 |
+
return
|
| 2019 |
+
|
| 2020 |
+
self.check_warn_on_unable_to_start_executing(function_id)
|
| 2021 |
+
|
| 2022 |
+
def check_warn_on_unable_to_start_executing(self, function_id: FunctionID):
|
| 2023 |
+
"Warn if we in a potential loop where we are unable to hit fast path"
|
| 2024 |
+
if (
|
| 2025 |
+
function_id in self.warned_functions
|
| 2026 |
+
or not self.in_new_torch_compile_invocation()
|
| 2027 |
+
):
|
| 2028 |
+
return
|
| 2029 |
+
|
| 2030 |
+
existing_nodes = [
|
| 2031 |
+
node
|
| 2032 |
+
for node in self.current_node._path_from_root
|
| 2033 |
+
if node.wrapped_function.id == function_id
|
| 2034 |
+
]
|
| 2035 |
+
|
| 2036 |
+
if len(existing_nodes) <= 1:
|
| 2037 |
+
return
|
| 2038 |
+
|
| 2039 |
+
# repeated same pattern
|
| 2040 |
+
parents = {
|
| 2041 |
+
n.parent.wrapped_function.id
|
| 2042 |
+
for n in itertools.chain(existing_nodes, (self.current_node,))
|
| 2043 |
+
if n.parent is not None
|
| 2044 |
+
}
|
| 2045 |
+
if len(parents) == len(existing_nodes):
|
| 2046 |
+
return
|
| 2047 |
+
|
| 2048 |
+
self.warned_functions.add(function_id)
|
| 2049 |
+
warnings.warn(
|
| 2050 |
+
"Unable to hit fast path of CUDAGraphs because of pending, uninvoked backwards. "
|
| 2051 |
+
"Consider running with torch.no_grad() or using torch._inductor.cudagraph_mark_step_begin() "
|
| 2052 |
+
"before each model invocation"
|
| 2053 |
+
)
|
| 2054 |
+
|
| 2055 |
+
def dealloc_current_path_weakrefs(self):
|
| 2056 |
+
# TODO: we could also allow the these weak refs to continue to be allocated,
|
| 2057 |
+
# but that adds some complications.
|
| 2058 |
+
for node in self.current_node._path_from_root:
|
| 2059 |
+
assert len(node.tensor_weakrefs) == len(node.stack_traces)
|
| 2060 |
+
for t, stack_trace in zip(node.tensor_weakrefs, node.stack_traces):
|
| 2061 |
+
ten = None if t is None else t()
|
| 2062 |
+
if ten is None:
|
| 2063 |
+
continue
|
| 2064 |
+
|
| 2065 |
+
stack_trace = (
|
| 2066 |
+
stack_trace.strip()
|
| 2067 |
+
if stack_trace
|
| 2068 |
+
else "[Could not find stack trace]"
|
| 2069 |
+
)
|
| 2070 |
+
msg = (
|
| 2071 |
+
"Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run. "
|
| 2072 |
+
f"Stack trace: {stack_trace}. "
|
| 2073 |
+
"To prevent overwriting, clone the tensor outside of torch.compile() "
|
| 2074 |
+
"before running the model again."
|
| 2075 |
+
)
|
| 2076 |
+
torch._C._set_storage_access_error_msg(ten, msg)
|
| 2077 |
+
|
| 2078 |
+
deleted = set()
|
| 2079 |
+
for storage_ref in self.current_node.path_live_weakrefs():
|
| 2080 |
+
if storage_ref() and storage_ref.data_ptr() not in deleted:
|
| 2081 |
+
deleted.add(storage_ref.data_ptr())
|
| 2082 |
+
torch._C._free_And_Remove_DeleterFn(storage_ref())
|
| 2083 |
+
|
| 2084 |
+
def clear_current_path_state_and_set_to_none(self):
|
| 2085 |
+
self.current_node.clear_path_state()
|
| 2086 |
+
self.current_node = None
|
| 2087 |
+
|
| 2088 |
+
def apply_checkpoint_execution_state_in_allocator(self):
|
| 2089 |
+
"""
|
| 2090 |
+
Checkpoint the current execution state in the caching allocator so that
|
| 2091 |
+
additional cudagraph recordings can be made respecting existent live storages.
|
| 2092 |
+
"""
|
| 2093 |
+
self.debug_checkpointing_counter += 1
|
| 2094 |
+
log.debug(
|
| 2095 |
+
"Checkpointing cuda caching allocator state. Number of checkpoints %d",
|
| 2096 |
+
self.debug_checkpointing_counter,
|
| 2097 |
+
)
|
| 2098 |
+
|
| 2099 |
+
state = self.current_node.checkpointed_caching_state
|
| 2100 |
+
device = self.current_node.device
|
| 2101 |
+
assert state is not None and device is not None
|
| 2102 |
+
|
| 2103 |
+
# currently we deallocate on instead of allowing stale recordings
|
| 2104 |
+
stale_storages: List[int] = []
|
| 2105 |
+
|
| 2106 |
+
# remove cached tensors, otherwise they would prevent memory from being
|
| 2107 |
+
# reclaimed in subsequent recordings
|
| 2108 |
+
self.current_node.remove_path_cached_tensors()
|
| 2109 |
+
live_storages_wrappers = list(self.current_node.path_live_weakrefs())
|
| 2110 |
+
|
| 2111 |
+
live_storages_weak_refs = [t() for t in live_storages_wrappers]
|
| 2112 |
+
ptrs_to_deallocate = self.current_node.data_ptrs_dead_since_invocation()
|
| 2113 |
+
torch._C._cuda_setCheckpointPoolState(
|
| 2114 |
+
device, state, stale_storages, live_storages_weak_refs
|
| 2115 |
+
)
|
| 2116 |
+
|
| 2117 |
+
# NB: deduplicate aliased outputs
|
| 2118 |
+
for ptr in set(ptrs_to_deallocate):
|
| 2119 |
+
torch._C._cuda_cudaCachingAllocator_raw_delete(ptr)
|
| 2120 |
+
|
| 2121 |
+
# Now the live blocks should be exactly equal to the live storages in private pool
|
| 2122 |
+
if config.triton.slow_path_cudagraph_asserts:
|
| 2123 |
+
check_memory_pool(
|
| 2124 |
+
self.device_index, self.cuda_graphs_thread_pool, live_storages_wrappers
|
| 2125 |
+
)
|
| 2126 |
+
for wrapper in live_storages_wrappers:
|
| 2127 |
+
assert wrapper()
|
| 2128 |
+
assert torch._C._has_Standard_Deleter(wrapper())
|
| 2129 |
+
assert wrapper.data_ptr() not in ptrs_to_deallocate
|
| 2130 |
+
|
| 2131 |
+
def live_cudagraph_pool_storages_in_curr_execution(
|
| 2132 |
+
self,
|
| 2133 |
+
) -> List[StorageWeakRefPointer]:
|
| 2134 |
+
if self.current_node is None:
|
| 2135 |
+
return []
|
| 2136 |
+
# explicitly ignoring previous recorded outputs from past path
|
| 2137 |
+
return [t() for t in self.current_node.path_live_weakrefs()]
|
llava_next/lib/python3.10/site-packages/torch/_inductor/debug.py
ADDED
|
@@ -0,0 +1,471 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import contextlib
|
| 3 |
+
import cProfile
|
| 4 |
+
import dataclasses
|
| 5 |
+
import functools
|
| 6 |
+
import itertools
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import os.path
|
| 10 |
+
import pickle
|
| 11 |
+
import pstats
|
| 12 |
+
import shutil
|
| 13 |
+
import subprocess
|
| 14 |
+
from typing import Any, List
|
| 15 |
+
from unittest.mock import patch
|
| 16 |
+
|
| 17 |
+
from functorch.compile import draw_graph, get_aot_graph_name, get_graph_being_compiled
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from torch import fx as fx
|
| 21 |
+
|
| 22 |
+
from torch._dynamo.repro.after_aot import save_graph_repro, wrap_compiler_debug
|
| 23 |
+
from torch._dynamo.utils import get_debug_dir
|
| 24 |
+
from torch.fx.graph_module import GraphModule
|
| 25 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
|
| 26 |
+
from torch.fx.passes.tools_common import legalize_graph
|
| 27 |
+
from torch.utils._pytree import tree_map
|
| 28 |
+
|
| 29 |
+
from . import config, ir # noqa: F811, this is needed
|
| 30 |
+
from .scheduler import (
|
| 31 |
+
BaseSchedulerNode,
|
| 32 |
+
FusedSchedulerNode,
|
| 33 |
+
NopKernelSchedulerNode,
|
| 34 |
+
OutputNode,
|
| 35 |
+
SchedulerNode,
|
| 36 |
+
)
|
| 37 |
+
from .virtualized import V
|
| 38 |
+
|
| 39 |
+
log = logging.getLogger(__name__)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@functools.lru_cache(None)
|
| 43 |
+
def has_dot():
|
| 44 |
+
try:
|
| 45 |
+
subprocess.check_output(["which", "dot"], stderr=subprocess.PIPE)
|
| 46 |
+
return True
|
| 47 |
+
except subprocess.SubprocessError:
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def draw_buffers(nodes, print_graph=False, fname=None):
|
| 52 |
+
"""
|
| 53 |
+
Draw a graph in fname.svg.
|
| 54 |
+
nodes is a list of SchedulerNode objects.
|
| 55 |
+
"""
|
| 56 |
+
if not has_dot():
|
| 57 |
+
log.warning("draw_buffers() requires `graphviz` package")
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
if fname is None:
|
| 61 |
+
fname = get_graph_being_compiled()
|
| 62 |
+
|
| 63 |
+
graph = create_fx_from_snodes(nodes)
|
| 64 |
+
|
| 65 |
+
for node in graph.nodes:
|
| 66 |
+
if "fusion_meta" not in node.meta:
|
| 67 |
+
continue
|
| 68 |
+
group = node.meta["fusion_meta"].group
|
| 69 |
+
if isinstance(group, tuple):
|
| 70 |
+
group = group[1]
|
| 71 |
+
|
| 72 |
+
# gather meta data
|
| 73 |
+
dtype = None
|
| 74 |
+
if isinstance(node, ir.ComputedBuffer):
|
| 75 |
+
dtype = node.data.dtype
|
| 76 |
+
|
| 77 |
+
metadata = TensorMetadata(group, dtype, None, None, None, None, None)
|
| 78 |
+
node.meta["tensor_meta"] = metadata
|
| 79 |
+
|
| 80 |
+
if print_graph:
|
| 81 |
+
print(graph)
|
| 82 |
+
|
| 83 |
+
gm = GraphModule({}, graph)
|
| 84 |
+
legalize_graph(gm)
|
| 85 |
+
gm.graph.lint()
|
| 86 |
+
draw_graph(gm, fname, clear_meta=False)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def create_fx_from_snodes(snodes: List[BaseSchedulerNode]) -> fx.Graph:
|
| 90 |
+
"""
|
| 91 |
+
Creates a FX Graph from a list of SchedulerNode objects.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def get_fake_func(name):
|
| 95 |
+
def func1(*args):
|
| 96 |
+
return 0
|
| 97 |
+
|
| 98 |
+
func1.__name__ = name
|
| 99 |
+
return func1
|
| 100 |
+
|
| 101 |
+
FusionMeta = collections.namedtuple("FusionMeta", ["group", "snode", "type"])
|
| 102 |
+
|
| 103 |
+
buf_to_fx_node = {}
|
| 104 |
+
graph = torch.fx.Graph()
|
| 105 |
+
first_node = None
|
| 106 |
+
|
| 107 |
+
outputs = []
|
| 108 |
+
group: Any = None
|
| 109 |
+
# create call_function node for each Buffer and Kernel
|
| 110 |
+
for snode in snodes:
|
| 111 |
+
if snode.is_extern():
|
| 112 |
+
node_type = "extern"
|
| 113 |
+
group = node_type
|
| 114 |
+
elif snode.is_template():
|
| 115 |
+
node_type = "template"
|
| 116 |
+
group = node_type
|
| 117 |
+
elif isinstance(snode, NopKernelSchedulerNode):
|
| 118 |
+
node_type = "nop"
|
| 119 |
+
group = node_type
|
| 120 |
+
elif isinstance(snode, SchedulerNode):
|
| 121 |
+
node_type = "compute"
|
| 122 |
+
group = snode.group
|
| 123 |
+
elif isinstance(snode, FusedSchedulerNode):
|
| 124 |
+
node_type = "fused"
|
| 125 |
+
group = snode.group
|
| 126 |
+
else:
|
| 127 |
+
raise RuntimeError("Unknown node type")
|
| 128 |
+
|
| 129 |
+
fused_name = torch._inductor.utils.get_fused_kernel_name(
|
| 130 |
+
snode.get_nodes(), "original_aten"
|
| 131 |
+
)
|
| 132 |
+
func_name = f"{node_type}: {fused_name}"
|
| 133 |
+
node_func = get_fake_func(func_name)
|
| 134 |
+
fx_node = graph.call_function(node_func, args=(), kwargs=None)
|
| 135 |
+
|
| 136 |
+
def in_output(snode):
|
| 137 |
+
if isinstance(snode, FusedSchedulerNode):
|
| 138 |
+
return any(in_output(x) for x in snode.snodes)
|
| 139 |
+
return any(isinstance(user.node, OutputNode) for user in snode.users)
|
| 140 |
+
|
| 141 |
+
if in_output(snode):
|
| 142 |
+
outputs.append(fx_node)
|
| 143 |
+
name = snode.get_name()
|
| 144 |
+
fx_node.name = name
|
| 145 |
+
|
| 146 |
+
fx_node.meta["fusion_meta"] = FusionMeta(group, snode, node_type)
|
| 147 |
+
|
| 148 |
+
if isinstance(snode, FusedSchedulerNode):
|
| 149 |
+
for x in snode.snodes:
|
| 150 |
+
buf_to_fx_node[x.get_name()] = fx_node
|
| 151 |
+
buf_to_fx_node[name] = fx_node
|
| 152 |
+
|
| 153 |
+
if first_node is None:
|
| 154 |
+
first_node = fx_node
|
| 155 |
+
|
| 156 |
+
# create edges between nodes
|
| 157 |
+
for snode in snodes:
|
| 158 |
+
name = snode.get_name()
|
| 159 |
+
deps = snode.read_writes.reads
|
| 160 |
+
|
| 161 |
+
fx_node = buf_to_fx_node[name]
|
| 162 |
+
new_args = []
|
| 163 |
+
for dep in deps:
|
| 164 |
+
if dep.name in buf_to_fx_node:
|
| 165 |
+
dep_node = buf_to_fx_node[dep.name]
|
| 166 |
+
else:
|
| 167 |
+
with graph.inserting_before(first_node):
|
| 168 |
+
dep_node = graph.placeholder(dep.name)
|
| 169 |
+
buf_to_fx_node[dep.name] = dep_node
|
| 170 |
+
new_args.append(dep_node)
|
| 171 |
+
|
| 172 |
+
fx_node.args = tuple(new_args)
|
| 173 |
+
|
| 174 |
+
graph.output(outputs[0] if len(outputs) == 1 else tuple(outputs))
|
| 175 |
+
return graph
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@contextlib.contextmanager
|
| 179 |
+
def enable_aot_logging():
|
| 180 |
+
compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
|
| 181 |
+
|
| 182 |
+
import torch._functorch.aot_autograd
|
| 183 |
+
|
| 184 |
+
log = logging.getLogger(torch._functorch.aot_autograd.__name__)
|
| 185 |
+
|
| 186 |
+
stack = contextlib.ExitStack()
|
| 187 |
+
if not compile_debug:
|
| 188 |
+
try:
|
| 189 |
+
yield
|
| 190 |
+
finally:
|
| 191 |
+
stack.close()
|
| 192 |
+
return
|
| 193 |
+
|
| 194 |
+
# Enable all graphs to be logged to a file by setting the flags to True
|
| 195 |
+
# and the log level of the file logger to DEBUG
|
| 196 |
+
stack.enter_context(patch("functorch.compile.config.debug_partitioner", True))
|
| 197 |
+
|
| 198 |
+
path = os.path.join(get_debug_dir(), "torchinductor")
|
| 199 |
+
if not os.path.exists(path):
|
| 200 |
+
os.makedirs(path)
|
| 201 |
+
|
| 202 |
+
fh = logging.FileHandler(
|
| 203 |
+
os.path.join(
|
| 204 |
+
path,
|
| 205 |
+
f"aot_{get_aot_graph_name()}_debug.log",
|
| 206 |
+
)
|
| 207 |
+
)
|
| 208 |
+
fh.setLevel(logging.DEBUG)
|
| 209 |
+
fh.setFormatter(
|
| 210 |
+
logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
|
| 211 |
+
)
|
| 212 |
+
log.addHandler(fh)
|
| 213 |
+
try:
|
| 214 |
+
yield
|
| 215 |
+
finally:
|
| 216 |
+
log.removeHandler(fh)
|
| 217 |
+
stack.close()
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class DebugContext:
|
| 221 |
+
_counter = itertools.count()
|
| 222 |
+
|
| 223 |
+
@staticmethod
|
| 224 |
+
def wrap(fn):
|
| 225 |
+
@functools.wraps(fn)
|
| 226 |
+
def inner(*args, **kwargs):
|
| 227 |
+
with DebugContext():
|
| 228 |
+
return fn(*args, **kwargs)
|
| 229 |
+
|
| 230 |
+
return wrap_compiler_debug(inner, compiler_name="inductor")
|
| 231 |
+
|
| 232 |
+
@staticmethod
|
| 233 |
+
def create_debug_dir(folder_name):
|
| 234 |
+
for n in DebugContext._counter:
|
| 235 |
+
dirname = os.path.join(
|
| 236 |
+
get_debug_dir(),
|
| 237 |
+
"torchinductor",
|
| 238 |
+
f"{folder_name}.{n}",
|
| 239 |
+
)
|
| 240 |
+
if not os.path.exists(dirname):
|
| 241 |
+
os.makedirs(dirname)
|
| 242 |
+
return dirname
|
| 243 |
+
|
| 244 |
+
def __init__(self):
|
| 245 |
+
self._prof = None
|
| 246 |
+
self._path = None
|
| 247 |
+
self._stack = contextlib.ExitStack()
|
| 248 |
+
|
| 249 |
+
def copy(self, new_path: str):
|
| 250 |
+
if not self._path:
|
| 251 |
+
return
|
| 252 |
+
assert new_path.endswith(".debug"), new_path
|
| 253 |
+
if os.path.exists(new_path):
|
| 254 |
+
shutil.rmtree(new_path)
|
| 255 |
+
try:
|
| 256 |
+
shutil.copytree(self._path, new_path)
|
| 257 |
+
self._path = new_path
|
| 258 |
+
except OSError:
|
| 259 |
+
log.warning(
|
| 260 |
+
"Failed to copy debug files from %s to %s", self._path, new_path
|
| 261 |
+
)
|
| 262 |
+
pass
|
| 263 |
+
|
| 264 |
+
def fopen(self, filename):
|
| 265 |
+
assert self._path
|
| 266 |
+
return open(os.path.join(self._path, filename), "w")
|
| 267 |
+
|
| 268 |
+
def filename(self, suffix):
|
| 269 |
+
return os.path.join(self._path, suffix)
|
| 270 |
+
|
| 271 |
+
def upload_tar(self):
|
| 272 |
+
if config.trace.upload_tar is not None:
|
| 273 |
+
import tarfile
|
| 274 |
+
|
| 275 |
+
assert self._path
|
| 276 |
+
tar_file = os.path.join(
|
| 277 |
+
self._path, f"{os.path.basename(self._path)}.tar.gz"
|
| 278 |
+
)
|
| 279 |
+
with tarfile.open(tar_file, "w:gz") as tar:
|
| 280 |
+
tar.add(self._path, arcname=os.path.basename(self._path))
|
| 281 |
+
config.trace.upload_tar(tar_file)
|
| 282 |
+
|
| 283 |
+
def __enter__(self):
|
| 284 |
+
if config.debug:
|
| 285 |
+
log = logging.getLogger("torch._dynamo")
|
| 286 |
+
prev_level = log.level
|
| 287 |
+
log.setLevel(logging.DEBUG)
|
| 288 |
+
|
| 289 |
+
def reset_log_level(level):
|
| 290 |
+
log.setLevel(level)
|
| 291 |
+
|
| 292 |
+
self._stack.callback(reset_log_level, prev_level)
|
| 293 |
+
|
| 294 |
+
self._stack.enter_context(V.set_debug_handler(self))
|
| 295 |
+
|
| 296 |
+
if not config.trace.enabled:
|
| 297 |
+
return
|
| 298 |
+
|
| 299 |
+
self._path = self.create_debug_dir(get_aot_graph_name())
|
| 300 |
+
|
| 301 |
+
if config.trace.debug_log:
|
| 302 |
+
self._setup_log_capture("debug.log", logging.DEBUG)
|
| 303 |
+
if config.trace.info_log:
|
| 304 |
+
self._setup_log_capture("info.log", logging.INFO)
|
| 305 |
+
if config.trace.compile_profile:
|
| 306 |
+
self._prof = cProfile.Profile()
|
| 307 |
+
self._prof.enable()
|
| 308 |
+
|
| 309 |
+
def _setup_log_capture(self, filename, level):
|
| 310 |
+
log = logging.getLogger("torch._inductor")
|
| 311 |
+
fd = self._stack.enter_context(self.fopen(filename))
|
| 312 |
+
ch = logging.StreamHandler(fd)
|
| 313 |
+
ch.setLevel(level)
|
| 314 |
+
ch.setFormatter(
|
| 315 |
+
logging.Formatter("[%(filename)s:%(lineno)d %(levelname)s] %(message)s")
|
| 316 |
+
)
|
| 317 |
+
log.addHandler(ch)
|
| 318 |
+
log.setLevel(min(log.level, level))
|
| 319 |
+
self._stack.callback(log.removeHandler, ch)
|
| 320 |
+
|
| 321 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 322 |
+
if self._prof:
|
| 323 |
+
self._prof.disable()
|
| 324 |
+
self._save_profile_data()
|
| 325 |
+
|
| 326 |
+
if self._path:
|
| 327 |
+
self.upload_tar()
|
| 328 |
+
log.warning("%s debug trace: %s", get_graph_being_compiled(), self._path)
|
| 329 |
+
self._stack.close()
|
| 330 |
+
|
| 331 |
+
def _save_profile_data(self):
|
| 332 |
+
self._prof.dump_stats(self.filename("compile.prof"))
|
| 333 |
+
with self.fopen("compile.stats") as fd:
|
| 334 |
+
stats = pstats.Stats(self._prof, stream=fd)
|
| 335 |
+
stats.strip_dirs()
|
| 336 |
+
stats.sort_stats("cumtime")
|
| 337 |
+
stats.print_stats(100)
|
| 338 |
+
stats.sort_stats("tottime")
|
| 339 |
+
stats.print_stats(100)
|
| 340 |
+
|
| 341 |
+
def __getattr__(self, name):
|
| 342 |
+
if config.trace.enabled and getattr(config.trace, name):
|
| 343 |
+
try:
|
| 344 |
+
return getattr(DebugFormatter(self), name)
|
| 345 |
+
except Exception:
|
| 346 |
+
log.warning("Ignoring exception in debug code", exc_info=True)
|
| 347 |
+
else:
|
| 348 |
+
|
| 349 |
+
def ignored(*args, **kwargs):
|
| 350 |
+
pass
|
| 351 |
+
|
| 352 |
+
return ignored
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
SchedulerNodeList = List[Any]
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
class DebugFormatter:
|
| 359 |
+
def __init__(self, handler):
|
| 360 |
+
self.fopen = handler.fopen
|
| 361 |
+
self.filename = handler.filename
|
| 362 |
+
self.handler = handler
|
| 363 |
+
|
| 364 |
+
def fx_graph(self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]):
|
| 365 |
+
with self.fopen("fx_graph_runnable.py") as fd:
|
| 366 |
+
save_graph_repro(fd, gm, inputs, "inductor")
|
| 367 |
+
|
| 368 |
+
with self.fopen("fx_graph_readable.py") as fd:
|
| 369 |
+
fd.write(gm.print_readable(print_output=False))
|
| 370 |
+
|
| 371 |
+
def fx_graph_transformed(
|
| 372 |
+
self, gm: torch.fx.GraphModule, inputs: List[torch.Tensor]
|
| 373 |
+
):
|
| 374 |
+
with self.fopen("fx_graph_transformed.py") as fd:
|
| 375 |
+
fd.write(gm.print_readable(print_output=False))
|
| 376 |
+
|
| 377 |
+
def ir_pre_fusion(self, nodes: SchedulerNodeList):
|
| 378 |
+
self._write_ir("ir_pre_fusion.txt", nodes)
|
| 379 |
+
|
| 380 |
+
def ir_post_fusion(self, nodes: SchedulerNodeList):
|
| 381 |
+
self._write_ir("ir_post_fusion.txt", nodes)
|
| 382 |
+
|
| 383 |
+
def _write_ir(self, filename: str, nodes: SchedulerNodeList):
|
| 384 |
+
with self.fopen(filename) as fd:
|
| 385 |
+
for node in nodes:
|
| 386 |
+
fd.write(node.debug_str())
|
| 387 |
+
fd.write("\n\n\n")
|
| 388 |
+
|
| 389 |
+
def graph_diagram(self, nodes: SchedulerNodeList):
|
| 390 |
+
draw_buffers(nodes, fname=self.filename("graph_diagram.svg"))
|
| 391 |
+
|
| 392 |
+
def output_code(self, filename):
|
| 393 |
+
shutil.copy(filename, self.filename("output_code.py"))
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
@dataclasses.dataclass
|
| 397 |
+
class TensorMetadataHolder:
|
| 398 |
+
tensor_metadata: TensorMetadata
|
| 399 |
+
device: torch.device
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
save_args_cnt = itertools.count()
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def save_args_for_compile_fx_inner(*args, **kwargs):
|
| 406 |
+
"""
|
| 407 |
+
This function is used to save arguments for a compile_fx_inner function call
|
| 408 |
+
to the file system. Later on one can replay the compile_fx_inner call
|
| 409 |
+
with the saved arguments using load_args_and_run_compile_fx_inner.
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
folder = "/tmp/inductor_saved_args"
|
| 413 |
+
if not os.path.exists(folder):
|
| 414 |
+
os.mkdir(folder)
|
| 415 |
+
|
| 416 |
+
def handle_tensor(x):
|
| 417 |
+
"""
|
| 418 |
+
Pickle FakeTensor will result in error:
|
| 419 |
+
AttributeError: Can't pickle local object 'WeakValueDictionary.__init__.<locals>.remove'
|
| 420 |
+
|
| 421 |
+
Convert all Tensor to metadata. This may also makes pickle faster.
|
| 422 |
+
"""
|
| 423 |
+
if isinstance(x, torch.Tensor):
|
| 424 |
+
return TensorMetadataHolder(_extract_tensor_metadata(x), x.device)
|
| 425 |
+
else:
|
| 426 |
+
return x
|
| 427 |
+
|
| 428 |
+
args_to_save, kwargs_to_save = tree_map(handle_tensor, (args, kwargs))
|
| 429 |
+
|
| 430 |
+
fn_name = "compile_fx_inner"
|
| 431 |
+
path = f"{folder}/{fn_name}_{next(save_args_cnt)}.pkl"
|
| 432 |
+
with open(path, "wb") as f:
|
| 433 |
+
pickle.dump((args_to_save, kwargs_to_save), f)
|
| 434 |
+
|
| 435 |
+
if log.isEnabledFor(logging.DEBUG):
|
| 436 |
+
message = f"""
|
| 437 |
+
Arguments for a compile_fx_inner call is saved to {path}. To replay the call,
|
| 438 |
+
run the following:
|
| 439 |
+
|
| 440 |
+
from torch._inductor.debug import load_args_and_run_compile_fx_inner
|
| 441 |
+
load_args_and_run_compile_fx_inner({path!r})
|
| 442 |
+
"""
|
| 443 |
+
# call print rather than log.debug. log.debug will print message
|
| 444 |
+
# prefix for each line which makes the code snippet harder to be
|
| 445 |
+
# copied.
|
| 446 |
+
# Not a big deal since the code is already been guarded by checking
|
| 447 |
+
# the log level.
|
| 448 |
+
print(message)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def load_args_and_run_compile_fx_inner(path):
|
| 452 |
+
from torch._inductor.compile_fx import compile_fx_inner
|
| 453 |
+
|
| 454 |
+
with open(path, "rb") as f:
|
| 455 |
+
args, kwargs = pickle.load(f)
|
| 456 |
+
|
| 457 |
+
def handle_tensor(x):
|
| 458 |
+
if isinstance(x, TensorMetadataHolder):
|
| 459 |
+
return torch._dynamo.testing.rand_strided(
|
| 460 |
+
x.tensor_metadata.shape,
|
| 461 |
+
x.tensor_metadata.stride,
|
| 462 |
+
x.tensor_metadata.dtype,
|
| 463 |
+
x.device,
|
| 464 |
+
)
|
| 465 |
+
else:
|
| 466 |
+
return x
|
| 467 |
+
|
| 468 |
+
fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
|
| 469 |
+
with fake_mode, config.patch("save_args", False):
|
| 470 |
+
args, kwargs = tree_map(handle_tensor, (args, kwargs))
|
| 471 |
+
return compile_fx_inner(*args, **kwargs)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/decomposition.py
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
import math
|
| 4 |
+
import numbers
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch._decomp as decomp
|
| 9 |
+
import torch.ao.quantization.fx._decomposed
|
| 10 |
+
from torch._decomp import (
|
| 11 |
+
core_aten_decompositions,
|
| 12 |
+
get_decompositions,
|
| 13 |
+
remove_decompositions,
|
| 14 |
+
)
|
| 15 |
+
from torch._decomp.decompositions import pw_cast_for_opmath
|
| 16 |
+
from torch._decomp.decompositions_for_rng import extra_random_decomps
|
| 17 |
+
|
| 18 |
+
from . import config
|
| 19 |
+
|
| 20 |
+
log = logging.getLogger(__name__)
|
| 21 |
+
aten = torch.ops.aten
|
| 22 |
+
prims = torch.ops.prims
|
| 23 |
+
quantized_decomposed = torch.ops.quantized_decomposed
|
| 24 |
+
|
| 25 |
+
inductor_decompositions = get_decompositions(
|
| 26 |
+
[
|
| 27 |
+
aten._adaptive_avg_pool2d_backward,
|
| 28 |
+
aten.arange,
|
| 29 |
+
aten.bitwise_and_,
|
| 30 |
+
aten.bitwise_or_,
|
| 31 |
+
aten.clamp_min_,
|
| 32 |
+
aten.dist,
|
| 33 |
+
aten.empty_like,
|
| 34 |
+
aten.flip,
|
| 35 |
+
aten.gelu,
|
| 36 |
+
aten.hardtanh,
|
| 37 |
+
aten.index_select,
|
| 38 |
+
aten.lcm,
|
| 39 |
+
aten.leaky_relu,
|
| 40 |
+
aten.linalg_vector_norm,
|
| 41 |
+
aten._log_softmax,
|
| 42 |
+
aten.max_pool2d_with_indices_backward,
|
| 43 |
+
aten._native_batch_norm_legit,
|
| 44 |
+
aten._native_batch_norm_legit_functional,
|
| 45 |
+
aten._native_batch_norm_legit_no_training,
|
| 46 |
+
aten.native_batch_norm,
|
| 47 |
+
aten.native_group_norm,
|
| 48 |
+
aten.native_layer_norm,
|
| 49 |
+
aten._softmax,
|
| 50 |
+
aten.sin_,
|
| 51 |
+
aten.sqrt_,
|
| 52 |
+
aten.std,
|
| 53 |
+
aten.std_mean,
|
| 54 |
+
aten._to_copy,
|
| 55 |
+
aten.tril_indices,
|
| 56 |
+
aten.triu_indices,
|
| 57 |
+
aten.unsafe_split,
|
| 58 |
+
aten.upsample_bilinear2d.vec,
|
| 59 |
+
]
|
| 60 |
+
)
|
| 61 |
+
decompositions = {**core_aten_decompositions(), **inductor_decompositions}
|
| 62 |
+
|
| 63 |
+
# Remove unwanted decompositions included via the core ATen decompositions from
|
| 64 |
+
# the Inductor decomp table.
|
| 65 |
+
decomps_to_exclude = [
|
| 66 |
+
aten._unsafe_index,
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
remove_decompositions(decompositions, decomps_to_exclude)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def register_decomposition(ops):
|
| 73 |
+
for op in [ops] if callable(ops) else ops:
|
| 74 |
+
if op in decompositions:
|
| 75 |
+
log.warning("duplicate decomp: %s", ops)
|
| 76 |
+
return decomp.register_decomposition(ops, decompositions)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@register_decomposition(aten._unsafe_view.default)
|
| 80 |
+
def _unsafe_view(self, size):
|
| 81 |
+
# this makes pattern matching easier
|
| 82 |
+
return self.view(size)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# TODO: for now, inductor doesn't handle asserts
|
| 86 |
+
# because the condition is symbool -> tensor in the graph.
|
| 87 |
+
@register_decomposition([aten._assert_async.msg])
|
| 88 |
+
def assert_async_msg_decomp(tensor, msg):
|
| 89 |
+
return
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# Following `assert_async_msg_decomp` and implement as non-op.
|
| 93 |
+
@register_decomposition([aten._functional_assert_async.msg])
|
| 94 |
+
def functional_assert_async_msg_decomp(tensor, msg):
|
| 95 |
+
return
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@register_decomposition([aten.sym_constrain_range_for_size.default])
|
| 99 |
+
def sym_constrain_range_for_size(symbol, *, min=None, max=None):
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@register_decomposition([aten.clamp])
|
| 104 |
+
@pw_cast_for_opmath
|
| 105 |
+
def clamp(x, min=None, max=None):
|
| 106 |
+
if min is not None:
|
| 107 |
+
x = x.clamp_min(min)
|
| 108 |
+
if max is not None:
|
| 109 |
+
x = x.clamp_max(max)
|
| 110 |
+
return x
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
# TorchInductor-only decomposition. It should not be taken to core.
|
| 114 |
+
# See https://github.com/pytorch/torchdynamo/pull/1120
|
| 115 |
+
@register_decomposition([aten.floor_divide.default])
|
| 116 |
+
def floordiv(a, b):
|
| 117 |
+
return aten.div.Tensor_mode(a, b, rounding_mode="floor")
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Not really sure how to put this into the main library. PrimTorch wants
|
| 121 |
+
# empty_permuted to go to the prim, and typically users don't really want
|
| 122 |
+
# to decompose to empty_strided (but inductor is OK with it, because we are
|
| 123 |
+
# cool with strides and everything goes to empty_strided)
|
| 124 |
+
@register_decomposition([aten.empty_permuted.default])
|
| 125 |
+
def empty_permuted(size, physical_layout, **kwargs):
|
| 126 |
+
perm = [0] * len(size)
|
| 127 |
+
for p, l in enumerate(physical_layout):
|
| 128 |
+
perm[l] = p
|
| 129 |
+
return torch.empty([size[l] for l in physical_layout], **kwargs).permute(perm)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@register_decomposition([aten.convolution_backward])
|
| 133 |
+
def convolution_backward(
|
| 134 |
+
grad_output,
|
| 135 |
+
input,
|
| 136 |
+
weight,
|
| 137 |
+
bias_sizes,
|
| 138 |
+
stride,
|
| 139 |
+
padding,
|
| 140 |
+
dilation,
|
| 141 |
+
transposed,
|
| 142 |
+
output_padding,
|
| 143 |
+
groups,
|
| 144 |
+
output_mask,
|
| 145 |
+
):
|
| 146 |
+
if not output_mask[2] or grad_output.device.type != "cuda":
|
| 147 |
+
return NotImplemented
|
| 148 |
+
grad_bias = aten.sum(grad_output, [0] + list(range(2, grad_output.dim())))
|
| 149 |
+
grad_inp, grad_weight, _ = aten.convolution_backward(
|
| 150 |
+
grad_output,
|
| 151 |
+
input,
|
| 152 |
+
weight,
|
| 153 |
+
bias_sizes,
|
| 154 |
+
stride,
|
| 155 |
+
padding,
|
| 156 |
+
dilation,
|
| 157 |
+
transposed,
|
| 158 |
+
output_padding,
|
| 159 |
+
groups,
|
| 160 |
+
[output_mask[0], output_mask[1], False],
|
| 161 |
+
)
|
| 162 |
+
return (grad_inp, grad_weight, grad_bias)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@register_decomposition([aten.log2])
|
| 166 |
+
def log2(x):
|
| 167 |
+
return torch.log(x) * (1.0 / math.log(2.0))
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@register_decomposition([aten.round.decimals])
|
| 171 |
+
def round_dec(x, decimals=0):
|
| 172 |
+
ten_pow_decimals = 10.0**decimals
|
| 173 |
+
return aten.round(x * ten_pow_decimals) * (1.0 / ten_pow_decimals)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
@register_decomposition([aten.all.default])
|
| 177 |
+
def all(input):
|
| 178 |
+
return torch.logical_not(torch.any(torch.logical_not(input)))
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
@register_decomposition([aten.all.dim])
|
| 182 |
+
def all_dim(input, dim, keepdim=False):
|
| 183 |
+
return torch.logical_not(torch.any(torch.logical_not(input), dim, keepdim))
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@register_decomposition([aten.baddbmm])
|
| 187 |
+
def baddbmm(self, batch1, batch2, beta=1, alpha=1):
|
| 188 |
+
result = torch.bmm(batch1, batch2)
|
| 189 |
+
if not isinstance(alpha, numbers.Number) or alpha != 1:
|
| 190 |
+
result = result * alpha
|
| 191 |
+
if beta == 0:
|
| 192 |
+
return result
|
| 193 |
+
if not isinstance(beta, numbers.Number) or beta != 1:
|
| 194 |
+
self = self * beta
|
| 195 |
+
return self + result
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@register_decomposition([aten.bmm])
|
| 199 |
+
def bmm(self, batch2):
|
| 200 |
+
if self.device == "cpu":
|
| 201 |
+
if self.size(1) == 1 and batch2.size(-1) == 1:
|
| 202 |
+
return torch.sum(
|
| 203 |
+
self.squeeze(1) * batch2.squeeze(-1), dim=1, keepdim=True
|
| 204 |
+
).unsqueeze(1)
|
| 205 |
+
return NotImplemented
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@register_decomposition([aten.mm])
|
| 209 |
+
def mm(self, input2):
|
| 210 |
+
if self.device == "cpu":
|
| 211 |
+
if (
|
| 212 |
+
self.size(-1) == 1
|
| 213 |
+
and input2.size(0) == 1
|
| 214 |
+
and (self.dtype == input2.dtype)
|
| 215 |
+
and ((torch.numel(self) + torch.numel(input2)) <= 32)
|
| 216 |
+
):
|
| 217 |
+
return torch.cat([self[i, :] * input2 for i in range(self.size(0))])
|
| 218 |
+
if self.size(0) == 1 and input2.size(-1) == 1:
|
| 219 |
+
return torch.sum(
|
| 220 |
+
self.squeeze(0) * input2.squeeze(-1), dim=0, keepdim=True
|
| 221 |
+
).unsqueeze(0)
|
| 222 |
+
return NotImplemented
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@register_decomposition([aten.cat.default])
|
| 226 |
+
def cat(tensors, dim=0):
|
| 227 |
+
def non_empty_tensor(x):
|
| 228 |
+
# special case for cat'ing with an empty tensor -
|
| 229 |
+
# just drop the 'empty' inputs so they don't confuse the logic below.
|
| 230 |
+
return len(x.shape) > 1 or x.shape[0] > 0
|
| 231 |
+
|
| 232 |
+
filtered_tensors = list(filter(non_empty_tensor, tensors))
|
| 233 |
+
|
| 234 |
+
if len(filtered_tensors) == 1:
|
| 235 |
+
return tensors[0].clone()
|
| 236 |
+
elif 1 < len(filtered_tensors) < len(tensors):
|
| 237 |
+
# on the first call, when we remove empty tensors, we redispatch recursively
|
| 238 |
+
return aten.cat.default(filtered_tensors, dim)
|
| 239 |
+
# when no 'filtering' has occured, we raise to prevent infinite recursion (no more decomposition needed)
|
| 240 |
+
return NotImplemented
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
@register_decomposition([aten.angle])
|
| 244 |
+
def angle(x):
|
| 245 |
+
if x.is_complex():
|
| 246 |
+
return torch.where(
|
| 247 |
+
torch.isnan(x.real), float("nan"), torch.atan2(x.imag, x.real)
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
# when x is real number
|
| 251 |
+
# if x >= 0, return 0
|
| 252 |
+
# if x < 0, return pi
|
| 253 |
+
# if x is nan, return nan
|
| 254 |
+
ret = torch.where(x < 0, math.pi, 0.0)
|
| 255 |
+
nan = torch.where(torch.isnan(x), float("nan"), 0.0)
|
| 256 |
+
return ret + nan
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
@register_decomposition([aten.conj_physical])
|
| 260 |
+
def conj_physical(self):
|
| 261 |
+
assert not self.is_complex(), "TODO: implement this"
|
| 262 |
+
return self
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@register_decomposition([aten.lift, aten.detach_])
|
| 266 |
+
def lift(self):
|
| 267 |
+
return self
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
@register_decomposition([aten.bernoulli.default])
|
| 271 |
+
def bernoulli(self, *, generator=None):
|
| 272 |
+
assert generator is None
|
| 273 |
+
return torch.rand_like(self, dtype=torch.float32) < self
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@register_decomposition([aten.fmin, prims.fmin])
|
| 277 |
+
def fmin(self, other):
|
| 278 |
+
return torch.where(torch.isnan(other) | (other > self), self, other)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@register_decomposition([aten.fmax, prims.fmax])
|
| 282 |
+
def fmax(self, other):
|
| 283 |
+
return torch.where(torch.isnan(other) | (other < self), self, other)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
@register_decomposition([aten.narrow_copy])
|
| 287 |
+
def narrow_copy(self, dim, start, length):
|
| 288 |
+
return torch.narrow(self, dim, start, length).clone()
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@register_decomposition([aten.expand_copy])
|
| 292 |
+
def expand_copy(self, size, *, implicit=False):
|
| 293 |
+
return aten.expand(self, size, implicit=implicit).clone()
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@register_decomposition([aten.view_copy.default])
|
| 297 |
+
def view_copy_default(self, size):
|
| 298 |
+
return aten.view(self, size).clone()
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
@register_decomposition([aten.view_copy.dtype])
|
| 302 |
+
def view_copy_dtype(self, dtype):
|
| 303 |
+
return self.to(dtype).clone()
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
@register_decomposition(aten.rand_like)
|
| 307 |
+
def rand_like(self, *, dtype=None, device=None, **kwargs):
|
| 308 |
+
return torch.rand(
|
| 309 |
+
[*self.size()],
|
| 310 |
+
dtype=dtype or self.dtype,
|
| 311 |
+
device=device or self.device,
|
| 312 |
+
**kwargs,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
|
| 316 |
+
@register_decomposition(aten.randn_like)
|
| 317 |
+
def randn_like(self, *, dtype=None, device=None, **kwargs):
|
| 318 |
+
return torch.randn(
|
| 319 |
+
[*self.size()],
|
| 320 |
+
dtype=dtype or self.dtype,
|
| 321 |
+
device=device or self.device,
|
| 322 |
+
**kwargs,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@register_decomposition(aten.full_like)
|
| 327 |
+
def full_like(
|
| 328 |
+
self,
|
| 329 |
+
fill_value,
|
| 330 |
+
*,
|
| 331 |
+
dtype=None,
|
| 332 |
+
layout=None,
|
| 333 |
+
device=None,
|
| 334 |
+
pin_memory=False,
|
| 335 |
+
requires_grad=False,
|
| 336 |
+
memory_format=torch.preserve_format,
|
| 337 |
+
):
|
| 338 |
+
return torch.full(
|
| 339 |
+
[*self.size()],
|
| 340 |
+
fill_value,
|
| 341 |
+
dtype=dtype or self.dtype,
|
| 342 |
+
layout=layout or self.layout,
|
| 343 |
+
device=device or self.device,
|
| 344 |
+
requires_grad=requires_grad,
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
@register_decomposition(aten.randint_like.default)
|
| 349 |
+
def randint_like(self, high, *, dtype=None, device=None, **kwargs):
|
| 350 |
+
return aten.randint.low(
|
| 351 |
+
0,
|
| 352 |
+
high,
|
| 353 |
+
[*self.size()],
|
| 354 |
+
dtype=dtype or self.dtype,
|
| 355 |
+
device=device or self.device,
|
| 356 |
+
**kwargs,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@register_decomposition(aten.randint_like.low_dtype)
|
| 361 |
+
def randint_like_low(self, low, high, *, dtype=None, device=None, **kwargs):
|
| 362 |
+
return aten.randint.low(
|
| 363 |
+
low,
|
| 364 |
+
high,
|
| 365 |
+
[*self.size()],
|
| 366 |
+
dtype=dtype or self.dtype,
|
| 367 |
+
device=device or self.device,
|
| 368 |
+
**kwargs,
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
@register_decomposition(aten.randint.default)
|
| 373 |
+
def randint(high, size, **kwargs):
|
| 374 |
+
return aten.randint.low(0, high, size, **kwargs)
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
# The difference between quantize_per_tensor.default and quantize_per_tensor.tensor is
|
| 378 |
+
# scale and zero_point is scalar or scalar tensor
|
| 379 |
+
@register_decomposition(quantized_decomposed.quantize_per_tensor.default)
|
| 380 |
+
def quantize_per_tensor_default_decomp_impl(
|
| 381 |
+
input: torch.Tensor,
|
| 382 |
+
scale: float,
|
| 383 |
+
zero_point: int,
|
| 384 |
+
quant_min: int,
|
| 385 |
+
quant_max: int,
|
| 386 |
+
dtype: torch.dtype,
|
| 387 |
+
) -> torch.Tensor:
|
| 388 |
+
inv_scale = 1.0 / scale
|
| 389 |
+
return torch.clamp(
|
| 390 |
+
torch.round(input * inv_scale) + zero_point, quant_min, quant_max
|
| 391 |
+
).to(dtype)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
# The difference between dequantize_per_tensor.default and dequantize_per_tensor.tensor is
|
| 395 |
+
# scale and zero_point is scalar or scalar tensor
|
| 396 |
+
@register_decomposition(quantized_decomposed.dequantize_per_tensor.default)
|
| 397 |
+
def dequantize_per_tensor_default_decomp_impl(
|
| 398 |
+
input: torch.Tensor,
|
| 399 |
+
scale: float,
|
| 400 |
+
zero_point: int,
|
| 401 |
+
quant_min: int,
|
| 402 |
+
quant_max: int,
|
| 403 |
+
dtype: torch.dtype,
|
| 404 |
+
) -> torch.Tensor:
|
| 405 |
+
return (input.to(torch.float32) - zero_point) * scale
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
@register_decomposition(quantized_decomposed.quantize_per_tensor.tensor)
|
| 409 |
+
def quantize_per_tensor_tensor_decomp_impl(
|
| 410 |
+
input: torch.Tensor,
|
| 411 |
+
scale: torch.Tensor,
|
| 412 |
+
zero_point: torch.Tensor,
|
| 413 |
+
quant_min: int,
|
| 414 |
+
quant_max: int,
|
| 415 |
+
dtype: torch.dtype,
|
| 416 |
+
) -> torch.Tensor:
|
| 417 |
+
inv_scale = 1.0 / scale
|
| 418 |
+
return torch.clamp(
|
| 419 |
+
torch.round(input * inv_scale) + zero_point, quant_min, quant_max
|
| 420 |
+
).to(dtype)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
@register_decomposition(quantized_decomposed.dequantize_per_tensor.tensor)
|
| 424 |
+
def dequantize_per_tensor_tensor_decomp_impl(
|
| 425 |
+
input: torch.Tensor,
|
| 426 |
+
scale: torch.Tensor,
|
| 427 |
+
zero_point: torch.Tensor,
|
| 428 |
+
quant_min: int,
|
| 429 |
+
quant_max: int,
|
| 430 |
+
dtype: torch.dtype,
|
| 431 |
+
) -> torch.Tensor:
|
| 432 |
+
return (input.to(torch.float32) - zero_point) * scale
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
@register_decomposition(aten._foreach_addcmul.Scalar)
|
| 436 |
+
def _foreach_addcmul_scalar(self, left_tensors, right_tensors, scalar=1):
|
| 437 |
+
return aten._foreach_add.List(
|
| 438 |
+
self, aten._foreach_mul.List(left_tensors, right_tensors), alpha=scalar
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
@register_decomposition(aten._foreach_addcdiv.Scalar)
|
| 443 |
+
def _foreach_addcdiv_scalar(self, left_tensors, right_tensors, scalar=1):
|
| 444 |
+
return aten._foreach_add.List(
|
| 445 |
+
self, aten._foreach_div.List(left_tensors, right_tensors), alpha=scalar
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
@register_decomposition(aten._foreach_lerp.Scalar)
|
| 450 |
+
def _foreach_lerp_scalar(start_tensors, end_tensors, weight):
|
| 451 |
+
return aten._foreach_add.List(
|
| 452 |
+
start_tensors,
|
| 453 |
+
aten._foreach_mul.Scalar(
|
| 454 |
+
aten._foreach_sub.List(end_tensors, start_tensors), weight
|
| 455 |
+
),
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
@aten.miopen_batch_norm.default.py_impl(torch._C.DispatchKey.Autograd)
|
| 460 |
+
@register_decomposition(aten.miopen_batch_norm)
|
| 461 |
+
def miopen_batch_norm(
|
| 462 |
+
input: torch.Tensor,
|
| 463 |
+
weight: torch.Tensor,
|
| 464 |
+
bias: typing.Optional[torch.Tensor],
|
| 465 |
+
running_mean: typing.Optional[torch.Tensor],
|
| 466 |
+
running_var: typing.Optional[torch.Tensor],
|
| 467 |
+
training: bool,
|
| 468 |
+
exponential_average_factor: float,
|
| 469 |
+
epsilon: float,
|
| 470 |
+
):
|
| 471 |
+
a, b, c = aten.native_batch_norm(
|
| 472 |
+
input,
|
| 473 |
+
weight,
|
| 474 |
+
bias,
|
| 475 |
+
running_mean,
|
| 476 |
+
running_var,
|
| 477 |
+
training,
|
| 478 |
+
exponential_average_factor,
|
| 479 |
+
epsilon,
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if training:
|
| 483 |
+
return (a, b, c)
|
| 484 |
+
return (
|
| 485 |
+
a,
|
| 486 |
+
weight.new_zeros((0,)),
|
| 487 |
+
weight.new_zeros((0,)),
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
@functools.lru_cache(None)
|
| 492 |
+
def fast_random_decomps():
|
| 493 |
+
return {**decompositions, **extra_random_decomps}
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def select_decomp_table():
|
| 497 |
+
"""decomps can change based on config"""
|
| 498 |
+
if config.fallback_random:
|
| 499 |
+
return decompositions
|
| 500 |
+
return fast_random_decomps()
|
llava_next/lib/python3.10/site-packages/torch/_inductor/dependencies.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import dataclasses
|
| 3 |
+
import itertools
|
| 4 |
+
import logging
|
| 5 |
+
import re
|
| 6 |
+
import typing
|
| 7 |
+
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import sympy
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
from .codegen.common import index_prevent_reordering
|
| 14 |
+
from .utils import get_dtype_size, sympy_str, sympy_subs, sympy_symbol, VarRanges
|
| 15 |
+
from .virtualized import V
|
| 16 |
+
|
| 17 |
+
log = logging.getLogger(__name__)
|
| 18 |
+
is_indirect = re.compile(r"indirect|tmp").search
|
| 19 |
+
Dep = Union["MemoryDep", "StarDep", "WeakDep"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MemoryDep(typing.NamedTuple):
|
| 23 |
+
name: str
|
| 24 |
+
index: sympy.Expr # type: ignore[assignment]
|
| 25 |
+
var_names: Tuple[sympy.Symbol, ...]
|
| 26 |
+
size: Tuple[sympy.Expr, ...]
|
| 27 |
+
|
| 28 |
+
def __repr__(self):
|
| 29 |
+
return f"MemoryDep({self.name!r}, {self.index}, {self.ranges})"
|
| 30 |
+
|
| 31 |
+
@property
|
| 32 |
+
def ranges(self) -> Dict[sympy.Symbol, sympy.Expr]:
|
| 33 |
+
"""{c0: 128, c1: 512, ...}"""
|
| 34 |
+
return dict(zip(self.var_names, self.size))
|
| 35 |
+
|
| 36 |
+
def rename(self, renames: Dict[str, str]) -> "MemoryDep":
|
| 37 |
+
if self.name in renames:
|
| 38 |
+
return MemoryDep(
|
| 39 |
+
renames[self.name], self.index, var_names=self.var_names, size=self.size
|
| 40 |
+
)
|
| 41 |
+
return self
|
| 42 |
+
|
| 43 |
+
def numbytes_hint(self):
|
| 44 |
+
if self.is_indirect():
|
| 45 |
+
numel = V.graph.get_numel(self.name)
|
| 46 |
+
else:
|
| 47 |
+
vars = set(self.index.free_symbols)
|
| 48 |
+
numel = sympy.Integer(1)
|
| 49 |
+
for var, size in zip(self.var_names, self.size):
|
| 50 |
+
if var in vars:
|
| 51 |
+
numel = numel * size
|
| 52 |
+
return V.graph.sizevars.size_hint(numel) * get_dtype_size(
|
| 53 |
+
V.graph.get_dtype(self.name)
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
def is_contiguous(self) -> bool:
|
| 57 |
+
return isinstance(self.index, sympy.Symbol) and self.index in self.var_names
|
| 58 |
+
|
| 59 |
+
def is_scalar(self) -> bool:
|
| 60 |
+
if isinstance(self.index, sympy.Symbol):
|
| 61 |
+
return self.index not in self.var_names and not self.is_indirect()
|
| 62 |
+
return isinstance(self.index, (int, sympy.Integer))
|
| 63 |
+
|
| 64 |
+
def is_indirect(self) -> bool:
|
| 65 |
+
return any(is_indirect(v.name) for v in self.index.free_symbols)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class StarDep(typing.NamedTuple):
|
| 69 |
+
# depends on the entire buffer
|
| 70 |
+
name: str
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def index(self):
|
| 74 |
+
raise NotImplementedError("StarDep does not have an index")
|
| 75 |
+
|
| 76 |
+
def rename(self, renames: Dict[str, str]) -> "StarDep":
|
| 77 |
+
if self.name in renames:
|
| 78 |
+
return StarDep(renames[self.name])
|
| 79 |
+
return self
|
| 80 |
+
|
| 81 |
+
def numbytes_hint(self):
|
| 82 |
+
return V.graph.sizevars.size_hint(
|
| 83 |
+
V.graph.get_numel(self.name)
|
| 84 |
+
) * get_dtype_size(V.graph.get_dtype(self.name))
|
| 85 |
+
|
| 86 |
+
def is_contiguous(self) -> bool:
|
| 87 |
+
return False
|
| 88 |
+
|
| 89 |
+
def is_scalar(self) -> bool:
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
def is_indirect(self) -> bool:
|
| 93 |
+
return False
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Used for tracking mutation ordering
|
| 97 |
+
# if A reads a buffer and B mutates it
|
| 98 |
+
# B must be ordered after A
|
| 99 |
+
class WeakDep(typing.NamedTuple):
|
| 100 |
+
name: str
|
| 101 |
+
|
| 102 |
+
@property
|
| 103 |
+
def index(self):
|
| 104 |
+
raise NotImplementedError("WeakDep does not have an index")
|
| 105 |
+
|
| 106 |
+
def rename(self, renames: Dict[str, str]) -> "WeakDep":
|
| 107 |
+
if self.name in renames:
|
| 108 |
+
return WeakDep(renames[self.name])
|
| 109 |
+
return self
|
| 110 |
+
|
| 111 |
+
def numbytes_hint(self):
|
| 112 |
+
return 1 # Purely inserted for ordering, not an actual dep
|
| 113 |
+
|
| 114 |
+
def is_contiguous(self) -> bool:
|
| 115 |
+
return False
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class IndexExprDep(typing.NamedTuple):
|
| 119 |
+
index: sympy.Expr # type: ignore[assignment]
|
| 120 |
+
var_names: Tuple[sympy.Symbol, ...]
|
| 121 |
+
size: Tuple[sympy.Expr, ...]
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@dataclasses.dataclass
|
| 125 |
+
class ReadWrites:
|
| 126 |
+
reads: Set[Dep]
|
| 127 |
+
writes: Set[Dep]
|
| 128 |
+
index_exprs: Set[IndexExprDep]
|
| 129 |
+
range_vars: Optional[List[sympy.Expr]] = None
|
| 130 |
+
var_ranges: Optional[VarRanges] = None
|
| 131 |
+
op_counts: collections.Counter = None
|
| 132 |
+
|
| 133 |
+
def rename(self, renames: typing.Dict[str, str]) -> "ReadWrites":
|
| 134 |
+
return ReadWrites(
|
| 135 |
+
{dep.rename(renames) for dep in self.reads},
|
| 136 |
+
{dep.rename(renames) for dep in self.writes},
|
| 137 |
+
self.index_exprs,
|
| 138 |
+
self.range_vars,
|
| 139 |
+
self.var_ranges,
|
| 140 |
+
op_counts=self.op_counts,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
def with_read(self, dep: Dep) -> "ReadWrites":
|
| 144 |
+
assert isinstance(dep, (WeakDep, StarDep))
|
| 145 |
+
return ReadWrites(
|
| 146 |
+
set.union(self.reads, {dep}),
|
| 147 |
+
self.writes,
|
| 148 |
+
self.index_exprs,
|
| 149 |
+
self.range_vars,
|
| 150 |
+
self.var_ranges,
|
| 151 |
+
op_counts=self.op_counts,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def merge(self, other: "ReadWrites"):
|
| 155 |
+
reads = set.union(self.reads, other.reads)
|
| 156 |
+
writes = set.union(self.writes, other.writes)
|
| 157 |
+
index_exprs = set.union(self.index_exprs, other.index_exprs)
|
| 158 |
+
if self.op_counts is not None:
|
| 159 |
+
op_counts = collections.Counter(self.op_counts)
|
| 160 |
+
op_counts.update(other.op_counts or {})
|
| 161 |
+
else:
|
| 162 |
+
op_counts = other.op_counts
|
| 163 |
+
return ReadWrites(reads - writes, writes, index_exprs, op_counts=op_counts)
|
| 164 |
+
|
| 165 |
+
@staticmethod
|
| 166 |
+
def merge_list(read_writes: List["ReadWrites"]):
|
| 167 |
+
all_writes = set.union(*[rw.writes for rw in read_writes])
|
| 168 |
+
all_reads = set.union(*[rw.reads for rw in read_writes]) - all_writes
|
| 169 |
+
all_index_exprs = set.union(*[rw.index_exprs for rw in read_writes])
|
| 170 |
+
|
| 171 |
+
op_counts = collections.Counter()
|
| 172 |
+
for rw in read_writes:
|
| 173 |
+
if rw.op_counts is not None:
|
| 174 |
+
op_counts.update(rw.op_counts)
|
| 175 |
+
|
| 176 |
+
return ReadWrites(all_reads, all_writes, all_index_exprs, op_counts=op_counts)
|
| 177 |
+
|
| 178 |
+
def remove_reads(self, rem_reads):
|
| 179 |
+
return ReadWrites(
|
| 180 |
+
self.reads - rem_reads,
|
| 181 |
+
self.writes,
|
| 182 |
+
self.index_exprs,
|
| 183 |
+
self.range_vars,
|
| 184 |
+
self.var_ranges,
|
| 185 |
+
op_counts=self.op_counts,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
def reads_and_writes(self):
|
| 189 |
+
return itertools.chain(self.reads, self.writes)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class _RecordLoadStoreInner(V.MockHandler):
|
| 193 |
+
def __init__(self, var_ranges: VarRanges, normalize: bool):
|
| 194 |
+
super().__init__()
|
| 195 |
+
self._reads: Set[MemoryDep] = set()
|
| 196 |
+
self._writes: Set[MemoryDep] = set()
|
| 197 |
+
self._index_exprs: Set[IndexExprDep] = set()
|
| 198 |
+
self._var_ranges: VarRanges = var_ranges
|
| 199 |
+
self._normalize: bool = normalize
|
| 200 |
+
|
| 201 |
+
def canonicalize(
|
| 202 |
+
self, index: sympy.Expr
|
| 203 |
+
) -> Tuple[sympy.Expr, Tuple[sympy.Expr, ...]]:
|
| 204 |
+
if not self._normalize:
|
| 205 |
+
sizes = [V.graph.sizevars.simplify(x) for x in self._var_ranges.values()]
|
| 206 |
+
var_names = tuple(
|
| 207 |
+
k for k, v in zip(self._var_ranges.keys(), sizes) if v != 1
|
| 208 |
+
)
|
| 209 |
+
sizes = tuple(v for v in sizes if v != 1)
|
| 210 |
+
return index, var_names, sizes
|
| 211 |
+
|
| 212 |
+
# Try to further simplify the indexes even if simplify_loops didn't
|
| 213 |
+
# convert it to the simplest form because of the interference from
|
| 214 |
+
# different indexing formulas.
|
| 215 |
+
free_symbols = index.free_symbols
|
| 216 |
+
var_ranges = {
|
| 217 |
+
k: V.graph.sizevars.simplify(v)
|
| 218 |
+
for k, v in self._var_ranges.items()
|
| 219 |
+
# TODO(jansel): explore this further normalization
|
| 220 |
+
# if k in free_symbols
|
| 221 |
+
}
|
| 222 |
+
index_vars = [*var_ranges.keys()]
|
| 223 |
+
sizes = [*var_ranges.values()]
|
| 224 |
+
new_sizes, reindex, prune = V.graph.sizevars._simplify_loops(
|
| 225 |
+
index_vars,
|
| 226 |
+
sizes,
|
| 227 |
+
index_prevent_reordering([index], index_vars, sizes),
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
# assign new variables each dimension to deal with numbering mismatches
|
| 231 |
+
# d0, d1, d2 could become d0, d2 -- which won't match d0, d1
|
| 232 |
+
new_vars, add_var = var_builder(canonicalization_prefix())
|
| 233 |
+
replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))
|
| 234 |
+
index = sympy_subs(sympy.expand(index), replacement)
|
| 235 |
+
|
| 236 |
+
new_vars = [*new_vars.keys()]
|
| 237 |
+
new_sizes = [*new_sizes]
|
| 238 |
+
free_symbols = index.free_symbols
|
| 239 |
+
while new_vars and new_vars[-1] not in free_symbols:
|
| 240 |
+
# Reduction has last (reduced) dim in its sizes, but
|
| 241 |
+
# downstream users won't. Normalize this away.
|
| 242 |
+
new_vars.pop()
|
| 243 |
+
new_sizes.pop()
|
| 244 |
+
return index, tuple(new_vars), tuple(new_sizes)
|
| 245 |
+
|
| 246 |
+
def load(self, name: str, index: sympy.Expr) -> str:
|
| 247 |
+
self._reads.add(MemoryDep(name, *self.canonicalize(index)))
|
| 248 |
+
return f"load({name}, {sympy_str(index)})"
|
| 249 |
+
|
| 250 |
+
def load_seed(self, name: str, index: int):
|
| 251 |
+
assert isinstance(index, int)
|
| 252 |
+
return self.load(name, sympy.Integer(index))
|
| 253 |
+
|
| 254 |
+
def store(self, name: str, index: sympy.Expr, value: str, mode=None) -> str:
|
| 255 |
+
self._writes.add(MemoryDep(name, *self.canonicalize(index)))
|
| 256 |
+
return f"store({name}, {sympy_str(index)}, {value}, {mode})"
|
| 257 |
+
|
| 258 |
+
def store_reduction(self, name: str, index, value) -> str:
|
| 259 |
+
return self.store(name, index, f"store_reduction({value})")
|
| 260 |
+
|
| 261 |
+
def index_expr(self, index: sympy.Expr, dtype) -> str:
|
| 262 |
+
self._index_exprs.add(IndexExprDep(*self.canonicalize(index)))
|
| 263 |
+
return f"index_expr({sympy_str(index)}, {dtype})"
|
| 264 |
+
|
| 265 |
+
def bucketize(
|
| 266 |
+
self,
|
| 267 |
+
values,
|
| 268 |
+
offsets_name: str,
|
| 269 |
+
offsets_size: sympy.Expr,
|
| 270 |
+
indexing_dtype: torch.dtype,
|
| 271 |
+
right: bool,
|
| 272 |
+
):
|
| 273 |
+
self._reads.add(StarDep(offsets_name))
|
| 274 |
+
return f"bucketize({values}, {offsets_name}, {sympy_str(offsets_size)}, {indexing_dtype}, {right})"
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class _OpCounter:
|
| 278 |
+
"""Shim to count how many times each op is used"""
|
| 279 |
+
|
| 280 |
+
def __init__(self, inner):
|
| 281 |
+
super().__init__()
|
| 282 |
+
self.parent_handler = inner
|
| 283 |
+
self._op_counts = collections.Counter()
|
| 284 |
+
|
| 285 |
+
def __getattr__(self, name):
|
| 286 |
+
self._op_counts[name] += 1
|
| 287 |
+
return getattr(self.parent_handler, name)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
class RecordLoadStore(V.KernelFormatterHandler):
|
| 291 |
+
def __init__(self, var_ranges: VarRanges, normalize: bool):
|
| 292 |
+
parent_handler = _RecordLoadStoreInner(
|
| 293 |
+
var_ranges=var_ranges, normalize=normalize
|
| 294 |
+
)
|
| 295 |
+
parent_handler = _OpCounter(parent_handler)
|
| 296 |
+
super().__init__(parent_handler=parent_handler)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:
|
| 300 |
+
cnt = itertools.count()
|
| 301 |
+
var_ranges: VarRanges = dict()
|
| 302 |
+
|
| 303 |
+
def add_var(length: sympy.Expr) -> sympy.Symbol:
|
| 304 |
+
v = sympy_symbol(f"{prefix}{next(cnt)}")
|
| 305 |
+
var_ranges[v] = length
|
| 306 |
+
return v
|
| 307 |
+
|
| 308 |
+
return var_ranges, add_var
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def index_vars_no_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str):
|
| 312 |
+
var_ranges, add_var = var_builder(prefix)
|
| 313 |
+
args: List[List[sympy.Symbol]] = []
|
| 314 |
+
for size in argsizes:
|
| 315 |
+
args.append(list(map(add_var, size)))
|
| 316 |
+
return args, var_ranges
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def index_vars_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str = "d"):
|
| 320 |
+
from .ir import SqueezeView
|
| 321 |
+
|
| 322 |
+
var_ranges, add_var = var_builder(prefix)
|
| 323 |
+
args: List[List[sympy.Expr]] = []
|
| 324 |
+
new_sizes: List[List[sympy.Expr]] = []
|
| 325 |
+
for size in argsizes:
|
| 326 |
+
new_size, reindex = SqueezeView.squeezer(size)
|
| 327 |
+
new_sizes.append(new_size)
|
| 328 |
+
args.append(reindex(list(map(add_var, new_size))))
|
| 329 |
+
return args, var_ranges
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def extract_read_writes(
|
| 333 |
+
fn: Callable,
|
| 334 |
+
*argsizes: Tuple[sympy.Expr, ...],
|
| 335 |
+
normalize: bool = False,
|
| 336 |
+
prefix: str = "d",
|
| 337 |
+
):
|
| 338 |
+
args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)
|
| 339 |
+
rw = RecordLoadStore(var_ranges, normalize=normalize)
|
| 340 |
+
with V.set_ops_handler(rw): # type: ignore[call-arg]
|
| 341 |
+
fn(*args)
|
| 342 |
+
|
| 343 |
+
if normalize:
|
| 344 |
+
range_vars = [] # Number of vars could differ due to normalization
|
| 345 |
+
else:
|
| 346 |
+
range_vars = [*itertools.chain(*args)]
|
| 347 |
+
|
| 348 |
+
inner = rw.parent_handler.parent_handler
|
| 349 |
+
return ReadWrites(
|
| 350 |
+
set(inner._reads),
|
| 351 |
+
set(inner._writes),
|
| 352 |
+
inner._index_exprs,
|
| 353 |
+
range_vars,
|
| 354 |
+
var_ranges,
|
| 355 |
+
rw.parent_handler._op_counts,
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def canonicalization_prefix():
|
| 360 |
+
return "c"
|
llava_next/lib/python3.10/site-packages/torch/_inductor/exc.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tempfile
|
| 3 |
+
import textwrap
|
| 4 |
+
from functools import lru_cache
|
| 5 |
+
|
| 6 |
+
if os.environ.get("TORCHINDUCTOR_WRITE_MISSING_OPS") == "1":
|
| 7 |
+
|
| 8 |
+
@lru_cache(None)
|
| 9 |
+
def _record_missing_op(target):
|
| 10 |
+
with open(f"{tempfile.gettempdir()}/missing_ops.txt", "a") as fd:
|
| 11 |
+
fd.write(str(target) + "\n")
|
| 12 |
+
|
| 13 |
+
else:
|
| 14 |
+
|
| 15 |
+
def _record_missing_op(target):
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class OperatorIssue(RuntimeError):
|
| 20 |
+
@staticmethod
|
| 21 |
+
def operator_str(target, args, kwargs):
|
| 22 |
+
lines = [f"target: {target}"] + [
|
| 23 |
+
f"args[{i}]: {arg}" for i, arg in enumerate(args)
|
| 24 |
+
]
|
| 25 |
+
if kwargs:
|
| 26 |
+
lines.append(f"kwargs: {kwargs}")
|
| 27 |
+
return textwrap.indent("\n".join(lines), " ")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class MissingOperatorWithoutDecomp(OperatorIssue):
|
| 31 |
+
def __init__(self, target, args, kwargs):
|
| 32 |
+
_record_missing_op(target)
|
| 33 |
+
super().__init__(f"missing lowering\n{self.operator_str(target, args, kwargs)}")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class MissingOperatorWithDecomp(OperatorIssue):
|
| 37 |
+
def __init__(self, target, args, kwargs):
|
| 38 |
+
_record_missing_op(target)
|
| 39 |
+
super().__init__(
|
| 40 |
+
f"missing decomposition\n{self.operator_str(target, args, kwargs)}"
|
| 41 |
+
+ textwrap.dedent(
|
| 42 |
+
f"""
|
| 43 |
+
|
| 44 |
+
There is a decomposition available for {target} in
|
| 45 |
+
torch._decomp.get_decompositions(). Please add this operator to the
|
| 46 |
+
`decompositions` list in torch._inductor.decompositions
|
| 47 |
+
"""
|
| 48 |
+
)
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class LoweringException(OperatorIssue):
|
| 53 |
+
def __init__(self, exc, target, args, kwargs):
|
| 54 |
+
super().__init__(
|
| 55 |
+
f"{type(exc).__name__}: {exc}\n{self.operator_str(target, args, kwargs)}"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class InvalidCxxCompiler(RuntimeError):
|
| 60 |
+
def __init__(self):
|
| 61 |
+
from . import config
|
| 62 |
+
|
| 63 |
+
super().__init__(
|
| 64 |
+
f"No working C++ compiler found in {config.__name__}.cpp.cxx: {config.cpp.cxx}"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class CppCompileError(RuntimeError):
|
| 69 |
+
def __init__(self, cmd, output):
|
| 70 |
+
if isinstance(output, bytes):
|
| 71 |
+
output = output.decode("utf-8")
|
| 72 |
+
|
| 73 |
+
super().__init__(
|
| 74 |
+
textwrap.dedent(
|
| 75 |
+
"""
|
| 76 |
+
C++ compile error
|
| 77 |
+
|
| 78 |
+
Command:
|
| 79 |
+
{cmd}
|
| 80 |
+
|
| 81 |
+
Output:
|
| 82 |
+
{output}
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
.strip()
|
| 86 |
+
.format(cmd=" ".join(cmd), output=output)
|
| 87 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_utils.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Check the pattern: (nn.module, F.function/torch.Tensor.method) matched.
|
| 5 |
+
# Works for length 2 patterns with 1 module and 1 function/method.
|
| 6 |
+
def matches_module_function_pattern(pattern, node, modules):
|
| 7 |
+
if len(node.args) == 0:
|
| 8 |
+
return False
|
| 9 |
+
if not isinstance(node.args[0], torch.fx.Node) or not isinstance(
|
| 10 |
+
node, torch.fx.Node
|
| 11 |
+
):
|
| 12 |
+
return False
|
| 13 |
+
# the first node is call_module
|
| 14 |
+
if node.args[0].op != "call_module":
|
| 15 |
+
return False
|
| 16 |
+
if not isinstance(node.args[0].target, str):
|
| 17 |
+
return False
|
| 18 |
+
if node.args[0].target not in modules:
|
| 19 |
+
return False
|
| 20 |
+
if type(modules[node.args[0].target]) is not pattern[0]:
|
| 21 |
+
return False
|
| 22 |
+
# the second node is call_function or call_method
|
| 23 |
+
if node.op != "call_function" and node.op != "call_method":
|
| 24 |
+
return False
|
| 25 |
+
if node.target != pattern[1]:
|
| 26 |
+
return False
|
| 27 |
+
# make sure node.args[0] output is only used by current node.
|
| 28 |
+
if len(node.args[0].users) > 1:
|
| 29 |
+
return False
|
| 30 |
+
return True
|
llava_next/lib/python3.10/site-packages/torch/_inductor/graph.py
ADDED
|
@@ -0,0 +1,988 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
from contextlib import contextmanager
|
| 10 |
+
from typing import DefaultDict, Dict, List, Optional, Set, Tuple
|
| 11 |
+
|
| 12 |
+
import sympy
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch._logging
|
| 16 |
+
import torch.fx
|
| 17 |
+
from torch._decomp import get_decompositions
|
| 18 |
+
from torch._dynamo.utils import dynamo_timed
|
| 19 |
+
from torch.fx.experimental.symbolic_shapes import (
|
| 20 |
+
free_symbols,
|
| 21 |
+
magic_methods,
|
| 22 |
+
method_to_operator,
|
| 23 |
+
ShapeEnv,
|
| 24 |
+
SymTypes,
|
| 25 |
+
)
|
| 26 |
+
from torch.utils._mode_utils import no_dispatch
|
| 27 |
+
|
| 28 |
+
from . import config, ir, metrics
|
| 29 |
+
from .codegen.common import (
|
| 30 |
+
get_scheduling_for_device,
|
| 31 |
+
get_wrapper_codegen_for_device,
|
| 32 |
+
register_backend_for_device,
|
| 33 |
+
)
|
| 34 |
+
from .codegen.wrapper import CppWrapperCodeGen, CudaWrapperCodeGen, WrapperCodeGen
|
| 35 |
+
from .exc import (
|
| 36 |
+
LoweringException,
|
| 37 |
+
MissingOperatorWithDecomp,
|
| 38 |
+
MissingOperatorWithoutDecomp,
|
| 39 |
+
)
|
| 40 |
+
from .ir import Constant, FixedLayout, InputBuffer, Pointwise, Reduction, TensorBox
|
| 41 |
+
from .lowering import (
|
| 42 |
+
FALLBACK_ALLOW_LIST,
|
| 43 |
+
fallback_handler,
|
| 44 |
+
fallback_node_due_to_unsupported_type,
|
| 45 |
+
layout_constraints,
|
| 46 |
+
lowerings,
|
| 47 |
+
make_fallback,
|
| 48 |
+
needs_realized_inputs,
|
| 49 |
+
unsupported_output_tensor,
|
| 50 |
+
)
|
| 51 |
+
from .sizevars import SizeVarAllocator
|
| 52 |
+
from .utils import convert_shape_to_inductor, gather_origins, get_sympy_Expr_dtype
|
| 53 |
+
from .virtualized import V
|
| 54 |
+
|
| 55 |
+
log = logging.getLogger(__name__)
|
| 56 |
+
perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
|
| 57 |
+
output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def supported_dtype_of_cpp_wrapper(dtype, cuda):
|
| 61 |
+
supported_dtype = {
|
| 62 |
+
torch.float32,
|
| 63 |
+
torch.float64,
|
| 64 |
+
torch.int64,
|
| 65 |
+
torch.int32,
|
| 66 |
+
torch.int16,
|
| 67 |
+
torch.int8,
|
| 68 |
+
torch.uint8,
|
| 69 |
+
torch.bool,
|
| 70 |
+
torch.bfloat16,
|
| 71 |
+
torch.complex64,
|
| 72 |
+
# torch.float16, # TODO: implement this
|
| 73 |
+
}
|
| 74 |
+
if cuda:
|
| 75 |
+
supported_dtype.add(torch.float16)
|
| 76 |
+
|
| 77 |
+
return dtype in supported_dtype
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def may_get_constant_buffer_dtype(constant_buffer):
|
| 81 |
+
assert isinstance(
|
| 82 |
+
constant_buffer, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
|
| 83 |
+
), "get_constant_buffer_dtype only supports input of sympy.Symbol, sympy.Expr or sympy.core.numbers.Integer"
|
| 84 |
+
if isinstance(constant_buffer, sympy.core.numbers.Integer):
|
| 85 |
+
return torch.int64
|
| 86 |
+
|
| 87 |
+
if isinstance(constant_buffer, sympy.Expr):
|
| 88 |
+
return get_sympy_Expr_dtype(constant_buffer)
|
| 89 |
+
|
| 90 |
+
if constant_buffer.is_integer:
|
| 91 |
+
return torch.int64
|
| 92 |
+
elif constant_buffer.is_float:
|
| 93 |
+
return torch.float32
|
| 94 |
+
else:
|
| 95 |
+
return None
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def is_magic_method(op):
|
| 99 |
+
magic_ops = {method_to_operator(m) for m in magic_methods}
|
| 100 |
+
return op in magic_ops
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class GraphLowering(torch.fx.Interpreter):
|
| 104 |
+
def symbolic_sizes_strides(self, ex: torch.Tensor):
|
| 105 |
+
"""
|
| 106 |
+
Support dynamic shapes and dynamic strides by assigning variables
|
| 107 |
+
to each dimension. We duck-shape tensors, so if two tensors
|
| 108 |
+
have the same size they get assigned the same symbolic variable.
|
| 109 |
+
"""
|
| 110 |
+
if self.reuse_shape_env:
|
| 111 |
+
return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
|
| 112 |
+
ex.stride()
|
| 113 |
+
)
|
| 114 |
+
else:
|
| 115 |
+
from torch._dynamo.source import ConstantSource
|
| 116 |
+
|
| 117 |
+
# TODO: this should not be needed once #93059 lands
|
| 118 |
+
# https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
|
| 119 |
+
# TODO: make a dedicated UnknownSource for this?
|
| 120 |
+
# NB: This is using the legacy default behavior from
|
| 121 |
+
# create_symbolic_sizes_strides_storage_offset but we hope we can
|
| 122 |
+
# just delete this entirely
|
| 123 |
+
source = ConstantSource(
|
| 124 |
+
f"__inductor_unknown_tensor_{len(self._shape_env.var_to_val)}"
|
| 125 |
+
)
|
| 126 |
+
(
|
| 127 |
+
size,
|
| 128 |
+
stride,
|
| 129 |
+
_,
|
| 130 |
+
) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
|
| 131 |
+
ex,
|
| 132 |
+
source,
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
|
| 136 |
+
stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
|
| 137 |
+
return size, stride
|
| 138 |
+
|
| 139 |
+
def static_sizes_strides(self, ex: torch.Tensor):
|
| 140 |
+
"""
|
| 141 |
+
Primarily used to weights
|
| 142 |
+
"""
|
| 143 |
+
size = [sympy.Integer(i) for i in ex.size()]
|
| 144 |
+
stride = [sympy.Integer(i) for i in ex.stride()]
|
| 145 |
+
return size, stride
|
| 146 |
+
|
| 147 |
+
def init_backend_registration(self):
|
| 148 |
+
if get_scheduling_for_device("cpu") is None:
|
| 149 |
+
from .codegen.cpp import CppScheduling
|
| 150 |
+
|
| 151 |
+
register_backend_for_device("cpu", CppScheduling, WrapperCodeGen)
|
| 152 |
+
|
| 153 |
+
if get_scheduling_for_device("cuda") is None:
|
| 154 |
+
from .codegen.triton import TritonScheduling
|
| 155 |
+
|
| 156 |
+
register_backend_for_device("cuda", TritonScheduling, WrapperCodeGen)
|
| 157 |
+
|
| 158 |
+
def __init__(
|
| 159 |
+
self,
|
| 160 |
+
gm: torch.fx.GraphModule,
|
| 161 |
+
shape_env=None,
|
| 162 |
+
num_static_inputs=None,
|
| 163 |
+
graph_id=None,
|
| 164 |
+
cpp_wrapper=False,
|
| 165 |
+
aot_mode=False,
|
| 166 |
+
user_visible_outputs=frozenset(),
|
| 167 |
+
layout_opt=None,
|
| 168 |
+
):
|
| 169 |
+
super().__init__(gm)
|
| 170 |
+
|
| 171 |
+
self.layout_opt = (
|
| 172 |
+
layout_opt if layout_opt is not None else self.decide_layout_opt(gm)
|
| 173 |
+
)
|
| 174 |
+
self.num_channels_last_conv = 0
|
| 175 |
+
|
| 176 |
+
self.extra_traceback = False # we do our own error wrapping
|
| 177 |
+
if shape_env is None:
|
| 178 |
+
shape_env = ShapeEnv()
|
| 179 |
+
self.reuse_shape_env = False
|
| 180 |
+
else:
|
| 181 |
+
self._shape_env = shape_env
|
| 182 |
+
self.reuse_shape_env = True
|
| 183 |
+
self._shape_env = shape_env
|
| 184 |
+
self.sizevars = SizeVarAllocator(shape_env)
|
| 185 |
+
self.graph_inputs: Dict[str, TensorBox] = {}
|
| 186 |
+
self.graph_inputs_original: Dict[str, InputBuffer] = {}
|
| 187 |
+
self.graph_outputs: Optional[List[ir.IRNode]] = None
|
| 188 |
+
self.device_types: Set[str] = set()
|
| 189 |
+
self.device_idxs: Set[int] = set()
|
| 190 |
+
self.cuda = False
|
| 191 |
+
self.buffers: List[ir.ComputedBuffer] = []
|
| 192 |
+
self.constants: Dict[str, torch.Tensor] = {}
|
| 193 |
+
self.constant_reprs: Dict[str, str] = {}
|
| 194 |
+
self.removed_buffers: Set[str] = set()
|
| 195 |
+
self.removed_inplace_buffers: Set[str] = set()
|
| 196 |
+
self.mutated_buffers: Set[str] = set()
|
| 197 |
+
self.inplaced_to_remove: Set[str] = set()
|
| 198 |
+
self.wrapper_code: Optional[WrapperCodeGen] = None
|
| 199 |
+
self.current_node: Optional[torch.fx.Node] = None
|
| 200 |
+
self.num_static_inputs = num_static_inputs
|
| 201 |
+
self.lists: Dict[str, List[str]] = {}
|
| 202 |
+
self.mutated_inputs: Set[str] = set()
|
| 203 |
+
self.mutated_input_idxs: List[int] = []
|
| 204 |
+
self.unaligned_buffers: Set[str] = set()
|
| 205 |
+
self.name_to_buffer: Dict[str, ir.ComputedBuffer] = {}
|
| 206 |
+
self.name_to_users: DefaultDict[str, List[ir.IRNode]] = defaultdict(list)
|
| 207 |
+
self.creation_time = time.time()
|
| 208 |
+
self.name = "GraphLowering"
|
| 209 |
+
self.cpp_wrapper = cpp_wrapper
|
| 210 |
+
self.aot_mode = aot_mode
|
| 211 |
+
self.graph_id = graph_id
|
| 212 |
+
self.scheduler = None
|
| 213 |
+
self.nodes_prefer_channels_last = (
|
| 214 |
+
self.find_nodes_prefer_channels_last() if self.layout_opt else set()
|
| 215 |
+
)
|
| 216 |
+
self._warned_fallback = {"aten.convolution_backward"}
|
| 217 |
+
self.user_visible_outputs = user_visible_outputs
|
| 218 |
+
self.cache_key: str = "" # This is the cache key for the compiled artifact
|
| 219 |
+
self.cache_path: str = "" # This is the path in the filesystem where the compiled artifact is stored
|
| 220 |
+
self.cache_linemap: List[
|
| 221 |
+
Tuple[int, str]
|
| 222 |
+
] = (
|
| 223 |
+
[]
|
| 224 |
+
) # This is the linemap used by the profiler to mark custom compiled kernels getting run
|
| 225 |
+
# Used if lowering encounters cases where cudagraphs are not supported
|
| 226 |
+
self.disable_cudagraphs = False
|
| 227 |
+
self.init_backend_registration()
|
| 228 |
+
|
| 229 |
+
@staticmethod
|
| 230 |
+
def decide_layout_opt(gm) -> bool:
|
| 231 |
+
"""
|
| 232 |
+
Decide if we should enable layout optimization for this graph based on
|
| 233 |
+
heuristics.
|
| 234 |
+
"""
|
| 235 |
+
if not config.layout_optimization:
|
| 236 |
+
return False
|
| 237 |
+
|
| 238 |
+
conv_nodes = [
|
| 239 |
+
n for n in gm.graph.nodes if n.target == torch.ops.aten.convolution.default
|
| 240 |
+
]
|
| 241 |
+
nconv = len(conv_nodes)
|
| 242 |
+
|
| 243 |
+
if nconv == 0:
|
| 244 |
+
return False
|
| 245 |
+
|
| 246 |
+
# Currently on ROCm we are seeing some slow downs in gcnArch that do not
|
| 247 |
+
# have optimal NHWC implementations. On ROCm MI200 series we will
|
| 248 |
+
# default to the enforced last channels behavior, but on non-MI200 series
|
| 249 |
+
# we will disable the forced layout.
|
| 250 |
+
if torch.version.hip and torch.cuda.is_available():
|
| 251 |
+
gpu_name = torch.cuda.get_device_name(0)
|
| 252 |
+
if not re.search(r"MI2\d\d", gpu_name):
|
| 253 |
+
return False
|
| 254 |
+
|
| 255 |
+
# For cpu backend and mkldnn enabled, we always using channels_last for a better performance.
|
| 256 |
+
if (
|
| 257 |
+
all(
|
| 258 |
+
n.args[idx].meta["val"].device == torch.device("cpu")
|
| 259 |
+
for n in conv_nodes
|
| 260 |
+
for idx in [0, 1]
|
| 261 |
+
)
|
| 262 |
+
and torch.backends.mkldnn.enabled
|
| 263 |
+
and torch.backends.mkldnn.is_available()
|
| 264 |
+
):
|
| 265 |
+
return True
|
| 266 |
+
|
| 267 |
+
# Followering models are skipped due to this:
|
| 268 |
+
# jx_nest_base
|
| 269 |
+
# volo_d1_224
|
| 270 |
+
if len(list(gm.graph.nodes)) >= 300 * nconv:
|
| 271 |
+
log.debug("Only a few conv, skip layout optimization")
|
| 272 |
+
return False
|
| 273 |
+
|
| 274 |
+
if any(
|
| 275 |
+
free_symbols(n.args[idx].meta["val"]) for n in conv_nodes for idx in [0, 1]
|
| 276 |
+
):
|
| 277 |
+
log.debug(
|
| 278 |
+
"See perf regression with dynamic shape. Follow up in https://github.com/pytorch/pytorch/issues/102670"
|
| 279 |
+
)
|
| 280 |
+
return False
|
| 281 |
+
|
| 282 |
+
# Channels last layout can dramatically hurt grouped conv perf. E.g.
|
| 283 |
+
# Conv with arguments like
|
| 284 |
+
# {"input_shape": [32, 224, 112, 112], "weight_shape": [224, 112, 3, 3],
|
| 285 |
+
# "stride": [2, 2], "padding": [1, 1], "groups": 2}
|
| 286 |
+
# slows down 31x using channels last..
|
| 287 |
+
|
| 288 |
+
# But a lot of timm models use depthwise separable convolution which will
|
| 289 |
+
# result in grouped convolution with in-channel size == 1.
|
| 290 |
+
# For those grouped convolution, channels last still helps a lot.
|
| 291 |
+
# E.g.
|
| 292 |
+
# Conv with arguments
|
| 293 |
+
# {"input_shape": [128, 58, 56, 56], "weight_shape": [58, 1, 3, 3],
|
| 294 |
+
# "stride": [2, 2], "padding": [1, 1], "groups": 58}
|
| 295 |
+
# get 1.86x speedup with channels last layout.
|
| 296 |
+
#
|
| 297 |
+
# The following heuristics skip using channels-last if the model contains
|
| 298 |
+
# grouped convolution with in-channels > 1.
|
| 299 |
+
if any(
|
| 300 |
+
n.args[-1] > 1 and n.args[1].meta["val"].size(1) > 1 for n in conv_nodes
|
| 301 |
+
):
|
| 302 |
+
log.debug("Found grouped convolution with >1 in_channels!")
|
| 303 |
+
return False
|
| 304 |
+
|
| 305 |
+
# For some models that contain convolution with larger in-channel than out-channel, applying
|
| 306 |
+
# channels last hurts performance.
|
| 307 |
+
# Following models are skipped due to this:
|
| 308 |
+
# - pytorch_unet
|
| 309 |
+
# - phlippe_densenet (slightly worse)
|
| 310 |
+
# - Background_Matting (1.22x -> 0.821x)
|
| 311 |
+
# - pytorch_CycleGAN_and_pix2pix (1.597x -> 1.294x)
|
| 312 |
+
if any(
|
| 313 |
+
n.args[1].meta["val"].size(0) * 2 <= n.args[1].meta["val"].size(1)
|
| 314 |
+
and n.args[1].meta["val"].size(2) > 1
|
| 315 |
+
for n in conv_nodes
|
| 316 |
+
):
|
| 317 |
+
log.debug(
|
| 318 |
+
"Skip layout optimization because some convolutions have smaller out_channel"
|
| 319 |
+
)
|
| 320 |
+
return False
|
| 321 |
+
|
| 322 |
+
# Following models are skipped due to this:
|
| 323 |
+
# - functorch_maml_omniglot
|
| 324 |
+
if all(
|
| 325 |
+
n.args[1].meta["val"].size(0) <= 64 and n.args[1].meta["val"].size(1) <= 64
|
| 326 |
+
for n in conv_nodes
|
| 327 |
+
):
|
| 328 |
+
log.debug("Skip layout opt because all convolution channels are too small")
|
| 329 |
+
return False
|
| 330 |
+
|
| 331 |
+
# aten._scaled_dot_product_flash_attention requires the last stride of query/key/value
|
| 332 |
+
# to be 1. Check https://gist.github.com/shunting314/fa6eeab2aad8d1265c4d5e50b560d94f
|
| 333 |
+
# for more details.
|
| 334 |
+
#
|
| 335 |
+
# When a model contains aten._scaled_dot_product_flash_attention and we enable layout optimization,
|
| 336 |
+
# the op may get channels last input and fail. Example include: twins_pcpvt_base, xcit_large_24_p8_224
|
| 337 |
+
# for _scaled_dot_product_flash_attention and xcit_large_24_p8_224 for _scaled_dot_product_efficient_attention.
|
| 338 |
+
#
|
| 339 |
+
# We disable layout optimization if a model contains aten._scaled_dot_product_flash_attention.
|
| 340 |
+
#
|
| 341 |
+
# An alternative is to do necessary layout convertion to make sure aten._scaled_dot_product_flash_attention's
|
| 342 |
+
# inputs have the layout needed. But that seems to have worse perf than disabing the layout opt.
|
| 343 |
+
# TODO(shunting) revisit if we can still apply layout optimization to models containing sdpa while
|
| 344 |
+
# bringing perf gains.
|
| 345 |
+
for n in gm.graph.nodes:
|
| 346 |
+
if n.target in (
|
| 347 |
+
torch.ops.aten._scaled_dot_product_flash_attention.default,
|
| 348 |
+
torch.ops.aten._scaled_dot_product_efficient_attention.default,
|
| 349 |
+
):
|
| 350 |
+
log.debug(
|
| 351 |
+
"Skip layout optimization because sdpa (scaled dot product attention) is found"
|
| 352 |
+
)
|
| 353 |
+
return False
|
| 354 |
+
|
| 355 |
+
return True
|
| 356 |
+
|
| 357 |
+
def find_nodes_prefer_channels_last(self):
|
| 358 |
+
"""
|
| 359 |
+
The rule to decide if an node prefer channels last is simple.
|
| 360 |
+
1. if it's input/output of a convolution
|
| 361 |
+
2. if one of its user prefers channels last
|
| 362 |
+
|
| 363 |
+
We have rule 1 because cudnn runs a faster convolution kernel for channels last inputs;
|
| 364 |
+
Rule 2 is also important. It makes sure that indirect inputs to convolution also prefers
|
| 365 |
+
channels last.
|
| 366 |
+
|
| 367 |
+
Consider the scenario: conv -> batch-norm -> relu -> conv
|
| 368 |
+
Without rule 2, batch-norm output may use a contiguous layout. That will cause 2 extra copies:
|
| 369 |
+
1. the output of batch-norm should be channels last initially since its input is a conv's output.
|
| 370 |
+
Forcing the batch-norm's output to be contiguous results in the first copy
|
| 371 |
+
2. The second conv's input is initially contiguous. This layout is propagated from the batch-norm's output.
|
| 372 |
+
We need convert it to channels last layout which results in the second copy.
|
| 373 |
+
With rule 2, we makes sure all the tensors in the chain uses channels last layout. So both copies
|
| 374 |
+
can be saved.
|
| 375 |
+
"""
|
| 376 |
+
output_set = set()
|
| 377 |
+
for n in reversed(self.module.graph.nodes):
|
| 378 |
+
if n.target == torch.ops.aten.convolution.default:
|
| 379 |
+
output_set.add(n)
|
| 380 |
+
continue
|
| 381 |
+
|
| 382 |
+
for user in n.users:
|
| 383 |
+
if user in output_set:
|
| 384 |
+
output_set.add(n)
|
| 385 |
+
break
|
| 386 |
+
|
| 387 |
+
# need a second pass to add downstream nodes of those channel last nodes to the sets.
|
| 388 |
+
# This pass is especially needed to avoid mix-layout kernel inputs in backward pass.
|
| 389 |
+
#
|
| 390 |
+
# Let's say a conv-batchnorm 's output is passed to relu whose output is in turn returned
|
| 391 |
+
# from the fwd graph. Without this second pass, we will force relu's output to be contiguous.
|
| 392 |
+
# Then in the kernel in backward pass, the contiguous output of relu may be mix with other channels last
|
| 393 |
+
# tensors and passed to a kernel.
|
| 394 |
+
#
|
| 395 |
+
# This pass improve yolov3 training speedup from 1.116x (worse than disabling layout optimization speedup 1.196x) to 1.457x.
|
| 396 |
+
# It also improves dla102 training speedup from 1.240x (worse than disabling layout optimization speedup 1.523x) to 1.835x .
|
| 397 |
+
# This also helps the following models:
|
| 398 |
+
# - res2net101_26w_4s
|
| 399 |
+
# - res2net50_14w_8s
|
| 400 |
+
# - sebotnet33ts_256
|
| 401 |
+
for n in self.module.graph.nodes:
|
| 402 |
+
if n in output_set:
|
| 403 |
+
for child in n.users:
|
| 404 |
+
output_set.add(child)
|
| 405 |
+
|
| 406 |
+
return output_set
|
| 407 |
+
|
| 408 |
+
def warn_fallback(self, name):
|
| 409 |
+
if name not in self._warned_fallback:
|
| 410 |
+
self._warned_fallback.add(name)
|
| 411 |
+
perf_hint_log.info("Using FallbackKernel: %s", name)
|
| 412 |
+
|
| 413 |
+
def add_device_idx(self, idx: Optional[int]):
|
| 414 |
+
if idx is not None:
|
| 415 |
+
self.device_idxs.add(idx)
|
| 416 |
+
|
| 417 |
+
@property
|
| 418 |
+
def fake_mode(self):
|
| 419 |
+
return V.fake_mode
|
| 420 |
+
|
| 421 |
+
def get_buffer(self, buffer_name: str):
|
| 422 |
+
if buffer_name in self.name_to_buffer:
|
| 423 |
+
return self.name_to_buffer[buffer_name]
|
| 424 |
+
if buffer_name in self.graph_inputs:
|
| 425 |
+
return self.graph_inputs[buffer_name]
|
| 426 |
+
return None
|
| 427 |
+
|
| 428 |
+
def get_dtype(self, buffer_name: str):
|
| 429 |
+
if buffer_name in self.constants:
|
| 430 |
+
return self.constants[buffer_name].dtype
|
| 431 |
+
if buffer_name in self.name_to_buffer:
|
| 432 |
+
return self.name_to_buffer[buffer_name].get_dtype()
|
| 433 |
+
if buffer_name in self.graph_inputs:
|
| 434 |
+
return self.graph_inputs[buffer_name].get_dtype()
|
| 435 |
+
m = re.match(r"(as_strided|reinterpret_tensor)\(([a-zA-Z0-9_]+),", buffer_name)
|
| 436 |
+
if m:
|
| 437 |
+
return self.get_dtype(m.group(1))
|
| 438 |
+
raise KeyError(f"could not find {buffer_name}")
|
| 439 |
+
|
| 440 |
+
def get_numel(self, buffer_name: str):
|
| 441 |
+
from .ir import MultiOutputLayout
|
| 442 |
+
|
| 443 |
+
if buffer_name in self.constants:
|
| 444 |
+
return self.constants[buffer_name].numel()
|
| 445 |
+
if buffer_name in self.name_to_buffer:
|
| 446 |
+
buf = self.name_to_buffer[buffer_name]
|
| 447 |
+
if isinstance(getattr(buf, "layout", None), MultiOutputLayout):
|
| 448 |
+
return 1
|
| 449 |
+
return buf.get_numel()
|
| 450 |
+
if buffer_name in self.graph_inputs:
|
| 451 |
+
return self.graph_inputs[buffer_name].get_numel()
|
| 452 |
+
raise KeyError(f"could not find {buffer_name}")
|
| 453 |
+
|
| 454 |
+
@dynamo_timed
|
| 455 |
+
def run(self, *args):
|
| 456 |
+
return super().run(*args)
|
| 457 |
+
|
| 458 |
+
def disable_cpp_wrapper(self, cond):
|
| 459 |
+
metrics.disable_cpp_wrapper += 1
|
| 460 |
+
self.cpp_wrapper = False
|
| 461 |
+
log.debug("Set cpp_wrapper to False due to %s", cond)
|
| 462 |
+
|
| 463 |
+
def register_buffer(self, buffer: ir.ComputedBuffer):
|
| 464 |
+
name = f"buf{len(self.buffers)}"
|
| 465 |
+
self.buffers.append(buffer)
|
| 466 |
+
self.name_to_buffer[name] = buffer
|
| 467 |
+
return name
|
| 468 |
+
|
| 469 |
+
def register_list(self, buffer_names: List[str]):
|
| 470 |
+
name = "list_" + "_".join(buffer_names)
|
| 471 |
+
self.lists[name] = buffer_names
|
| 472 |
+
return name
|
| 473 |
+
|
| 474 |
+
def register_users_of(self, node_output):
|
| 475 |
+
def register(value):
|
| 476 |
+
if isinstance(value, (list, tuple)):
|
| 477 |
+
for x in value:
|
| 478 |
+
register(x)
|
| 479 |
+
if isinstance(value, ir.IRNode):
|
| 480 |
+
if (
|
| 481 |
+
not hasattr(value, "data")
|
| 482 |
+
or not isinstance(value.data, ir.IRNode)
|
| 483 |
+
or not isinstance(value.data.data, ir.IRNode)
|
| 484 |
+
):
|
| 485 |
+
return
|
| 486 |
+
|
| 487 |
+
for read_name in value.get_read_names():
|
| 488 |
+
self.name_to_users[read_name].append(value)
|
| 489 |
+
|
| 490 |
+
register(node_output)
|
| 491 |
+
|
| 492 |
+
def mark_buffer_mutated(self, name: str):
|
| 493 |
+
"""
|
| 494 |
+
When a buffer is mutated we need to make sure all the reads to
|
| 495 |
+
the old version are realized before the mutation happens.
|
| 496 |
+
"""
|
| 497 |
+
assert isinstance(name, str)
|
| 498 |
+
self.mutated_buffers.add(name)
|
| 499 |
+
|
| 500 |
+
if name not in self.name_to_users:
|
| 501 |
+
return
|
| 502 |
+
|
| 503 |
+
for user in self.name_to_users[name]:
|
| 504 |
+
user.realize()
|
| 505 |
+
|
| 506 |
+
def add_tensor_constant(self, data):
|
| 507 |
+
def allocate():
|
| 508 |
+
for name, value in self.constants.items():
|
| 509 |
+
if (
|
| 510 |
+
not data.is_mkldnn
|
| 511 |
+
and data.size() == value.size()
|
| 512 |
+
and data.stride() == value.stride()
|
| 513 |
+
and data.dtype == value.dtype
|
| 514 |
+
and data.device == value.device
|
| 515 |
+
and torch.eq(data, value).all()
|
| 516 |
+
):
|
| 517 |
+
return name
|
| 518 |
+
name = f"constant{len(self.constants)}"
|
| 519 |
+
self.constants[name] = data
|
| 520 |
+
self.constant_reprs[name] = hashlib.sha256(
|
| 521 |
+
repr(data).encode("utf-8")
|
| 522 |
+
).hexdigest()
|
| 523 |
+
return name
|
| 524 |
+
|
| 525 |
+
return TensorBox.create(
|
| 526 |
+
ir.ConstantBuffer(
|
| 527 |
+
allocate(),
|
| 528 |
+
FixedLayout(data.device, data.dtype, *self.static_sizes_strides(data)),
|
| 529 |
+
)
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
def constant_name(self, name: str, device_override: torch.device):
|
| 533 |
+
"""
|
| 534 |
+
We AOT copy constants to the devices they are needed on.
|
| 535 |
+
If device_override doesn't match the constant's device, then
|
| 536 |
+
copy it and return a different name.
|
| 537 |
+
"""
|
| 538 |
+
if self.constants[name].device == device_override or device_override is None:
|
| 539 |
+
return name
|
| 540 |
+
alt_name = f"{name}_{device_override.type}{device_override.index or 0}"
|
| 541 |
+
if alt_name not in self.constants:
|
| 542 |
+
self.constants[alt_name] = self.constants[name].to(device_override)
|
| 543 |
+
return alt_name
|
| 544 |
+
|
| 545 |
+
def placeholder(self, target: str, args, kwargs):
|
| 546 |
+
example = super().placeholder(target, args, kwargs)
|
| 547 |
+
if isinstance(example, SymTypes):
|
| 548 |
+
expr = example.node.expr
|
| 549 |
+
self.graph_inputs[target] = expr
|
| 550 |
+
return expr
|
| 551 |
+
elif isinstance(example, (int, bool, float)):
|
| 552 |
+
expr = sympy.sympify(example)
|
| 553 |
+
self.graph_inputs[target] = expr
|
| 554 |
+
return expr
|
| 555 |
+
assert isinstance(example, torch.Tensor), example
|
| 556 |
+
# todo(chilli): We can remove the last check once we turn buffers into
|
| 557 |
+
# static shape tensors. That's a hack to workaround Inductor believing
|
| 558 |
+
# the buffer should be static but us passing in a fake tensor with
|
| 559 |
+
# symbolic shapes.
|
| 560 |
+
if not example._has_symbolic_sizes_strides:
|
| 561 |
+
# the first N inputs are weights
|
| 562 |
+
sizes, strides = self.static_sizes_strides(example)
|
| 563 |
+
else:
|
| 564 |
+
sizes, strides = self.symbolic_sizes_strides(example)
|
| 565 |
+
# TODO(jansel): handle input aliasing
|
| 566 |
+
tensor = TensorBox.create(
|
| 567 |
+
InputBuffer(
|
| 568 |
+
target,
|
| 569 |
+
FixedLayout(example.device, example.dtype, sizes, strides),
|
| 570 |
+
)
|
| 571 |
+
)
|
| 572 |
+
self.graph_inputs[target] = tensor
|
| 573 |
+
self.graph_inputs_original[target] = tensor.data.data
|
| 574 |
+
self.device_types.add(example.device.type)
|
| 575 |
+
self.add_device_idx(example.device.index)
|
| 576 |
+
return tensor
|
| 577 |
+
|
| 578 |
+
def call_function(self, target, args, kwargs):
|
| 579 |
+
if target is operator.getitem and isinstance(args[0], (list, tuple)):
|
| 580 |
+
return super().call_function(target, args, kwargs)
|
| 581 |
+
|
| 582 |
+
if hasattr(target, "_inductor_lowering_function"):
|
| 583 |
+
# passthrough lowerings from .pattern_matcher
|
| 584 |
+
return target(*args, **kwargs)
|
| 585 |
+
|
| 586 |
+
if target not in lowerings:
|
| 587 |
+
base_name = target.name().split(".")[0]
|
| 588 |
+
if base_name in FALLBACK_ALLOW_LIST:
|
| 589 |
+
make_fallback(target)
|
| 590 |
+
elif config.implicit_fallbacks:
|
| 591 |
+
error = (
|
| 592 |
+
MissingOperatorWithDecomp
|
| 593 |
+
if get_decompositions([target])
|
| 594 |
+
else MissingOperatorWithoutDecomp
|
| 595 |
+
)
|
| 596 |
+
log.info(
|
| 597 |
+
"Creating implicit fallback for:\n%s",
|
| 598 |
+
error.operator_str(target, args, kwargs),
|
| 599 |
+
)
|
| 600 |
+
make_fallback(target)
|
| 601 |
+
elif get_decompositions([target]):
|
| 602 |
+
# There isn't a good way to dynamically patch this in
|
| 603 |
+
# since AOT Autograd already ran. The error message tells
|
| 604 |
+
# the user how to fix it.
|
| 605 |
+
raise MissingOperatorWithDecomp(target, args, kwargs)
|
| 606 |
+
else:
|
| 607 |
+
raise MissingOperatorWithoutDecomp(target, args, kwargs)
|
| 608 |
+
|
| 609 |
+
try:
|
| 610 |
+
out = lowerings[target](*args, **kwargs)
|
| 611 |
+
return out
|
| 612 |
+
except Exception as e:
|
| 613 |
+
raise LoweringException(e, target, args, kwargs).with_traceback(
|
| 614 |
+
e.__traceback__
|
| 615 |
+
) from None
|
| 616 |
+
|
| 617 |
+
def get_attr(self, target, args, kwargs):
|
| 618 |
+
# this is a constant
|
| 619 |
+
value = getattr(self.module, target)
|
| 620 |
+
|
| 621 |
+
if unsupported_output_tensor(value):
|
| 622 |
+
return self.add_tensor_constant(value)
|
| 623 |
+
|
| 624 |
+
with no_dispatch():
|
| 625 |
+
if value.shape == ():
|
| 626 |
+
return Constant(value.item(), value.dtype, value.device)
|
| 627 |
+
if len(value.shape) == 1 and value.shape[0] <= 8:
|
| 628 |
+
# tensor lowering has constant inlining logic
|
| 629 |
+
from .lowering import tensor
|
| 630 |
+
|
| 631 |
+
return tensor(value.tolist(), dtype=value.dtype, device=value.device)
|
| 632 |
+
|
| 633 |
+
return self.add_tensor_constant(value)
|
| 634 |
+
|
| 635 |
+
def call_module(self, target, args, kwargs):
|
| 636 |
+
raise AssertionError()
|
| 637 |
+
|
| 638 |
+
def call_method(self, target, args, kwargs):
|
| 639 |
+
raise AssertionError()
|
| 640 |
+
|
| 641 |
+
def output(self, target, args, kwargs):
|
| 642 |
+
result = super().output(target, args, kwargs)
|
| 643 |
+
assert isinstance(result, (tuple, list)), type(result)
|
| 644 |
+
assert all(
|
| 645 |
+
isinstance(
|
| 646 |
+
x,
|
| 647 |
+
(
|
| 648 |
+
TensorBox,
|
| 649 |
+
ir.Constant,
|
| 650 |
+
type(None),
|
| 651 |
+
ir.ConstantBuffer,
|
| 652 |
+
sympy.Expr,
|
| 653 |
+
sympy.logic.boolalg.Boolean,
|
| 654 |
+
int,
|
| 655 |
+
),
|
| 656 |
+
)
|
| 657 |
+
for x in result
|
| 658 |
+
), result
|
| 659 |
+
self.graph_outputs = [ir.ExternKernel.realize_input(x) for x in result]
|
| 660 |
+
value: ir.IRNode
|
| 661 |
+
for name, value in self.graph_inputs.items():
|
| 662 |
+
assert isinstance(value, (TensorBox, sympy.Expr))
|
| 663 |
+
if not isinstance(value, TensorBox):
|
| 664 |
+
continue
|
| 665 |
+
value.realize()
|
| 666 |
+
assert isinstance(value, TensorBox)
|
| 667 |
+
value = value.data
|
| 668 |
+
assert isinstance(value, ir.StorageBox)
|
| 669 |
+
value_storage_box = value
|
| 670 |
+
value = value.data
|
| 671 |
+
if not isinstance(value, InputBuffer) or value.get_name() != name:
|
| 672 |
+
# one of our inputs was mutated, need to turn that into a copy
|
| 673 |
+
ir.MutationLayout.realize_into(value, self.graph_inputs_original[name])
|
| 674 |
+
# replace output with mutated input
|
| 675 |
+
try:
|
| 676 |
+
ind = self.graph_outputs.index(value_storage_box)
|
| 677 |
+
self.graph_outputs[ind] = self.graph_inputs_original[name]
|
| 678 |
+
except ValueError:
|
| 679 |
+
pass
|
| 680 |
+
|
| 681 |
+
self.finalize()
|
| 682 |
+
log.debug(
|
| 683 |
+
"Force channels last inputs for %d conv for the current graph with id %d",
|
| 684 |
+
self.num_channels_last_conv,
|
| 685 |
+
self.graph_id,
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
def finalize(self):
|
| 689 |
+
for buf in self.buffers:
|
| 690 |
+
buf.decide_layout()
|
| 691 |
+
|
| 692 |
+
def run_node(self, n: torch.fx.Node):
|
| 693 |
+
origins = {n}
|
| 694 |
+
if n.op == "call_function":
|
| 695 |
+
args, kwargs = self.fetch_args_kwargs_from_env(n)
|
| 696 |
+
origins |= gather_origins(args, kwargs)
|
| 697 |
+
with ir.IRNode.current_origins(origins), self.set_current_node(n):
|
| 698 |
+
if (
|
| 699 |
+
n.op == "call_function"
|
| 700 |
+
and n.target is not operator.getitem
|
| 701 |
+
and fallback_node_due_to_unsupported_type(n)
|
| 702 |
+
):
|
| 703 |
+
result = fallback_handler(n.target, add_to_fallback_set=False)(
|
| 704 |
+
*args, **kwargs
|
| 705 |
+
)
|
| 706 |
+
elif n.op == "call_function" and n.target in layout_constraints:
|
| 707 |
+
args, kwargs = layout_constraints[n.target](n, *args, **kwargs)
|
| 708 |
+
result = self.call_function(n.target, args, kwargs)
|
| 709 |
+
elif n.target == torch.ops.aten.sym_stride:
|
| 710 |
+
# inductor graphs can occasionally return sizes/strides,
|
| 711 |
+
# e.g. if we need to save symints for the backward graph.
|
| 712 |
+
if isinstance(n.meta["val"], torch.SymInt):
|
| 713 |
+
result = n.meta["val"].node.expr
|
| 714 |
+
else:
|
| 715 |
+
result = super().run_node(n)
|
| 716 |
+
elif is_magic_method(n.target):
|
| 717 |
+
if isinstance(n.meta["val"], torch.SymInt):
|
| 718 |
+
result = n.meta["val"].node.expr
|
| 719 |
+
else:
|
| 720 |
+
result = super().run_node(n)
|
| 721 |
+
else:
|
| 722 |
+
result = super().run_node(n)
|
| 723 |
+
|
| 724 |
+
# require the same stride order for dense outputs,
|
| 725 |
+
# 1. user-land view() will not throw because inductor
|
| 726 |
+
# output different strides than eager
|
| 727 |
+
# long term the solution is to make view() always succeed
|
| 728 |
+
# with infallible strides.
|
| 729 |
+
# 2: as_strided ops, we need make sure its input has same size/stride with
|
| 730 |
+
# eager model to align with eager behavior.
|
| 731 |
+
as_strided_ops = [
|
| 732 |
+
torch.ops.aten.as_strided.default,
|
| 733 |
+
torch.ops.aten.as_strided_.default,
|
| 734 |
+
torch.ops.aten.as_strided_scatter.default,
|
| 735 |
+
]
|
| 736 |
+
is_output = any(user.op == "output" for user in n.users)
|
| 737 |
+
is_input_for_as_strided = any(
|
| 738 |
+
user.target in as_strided_ops for user in n.users
|
| 739 |
+
)
|
| 740 |
+
if (is_output or is_input_for_as_strided) and isinstance(
|
| 741 |
+
n.meta["val"], torch.Tensor
|
| 742 |
+
):
|
| 743 |
+
strides = n.meta["val"].stride()
|
| 744 |
+
dense = torch._prims_common.is_non_overlapping_and_dense(n.meta["val"])
|
| 745 |
+
# requiring a stride order for a non-dense output wouldn't
|
| 746 |
+
# recreate the same strides, and would fail with view, defer for now.
|
| 747 |
+
if dense and len(strides):
|
| 748 |
+
stride_order = ir.get_stride_order(strides)
|
| 749 |
+
if (
|
| 750 |
+
len(result.get_size()) == 4
|
| 751 |
+
and n in self.nodes_prefer_channels_last
|
| 752 |
+
and n.name not in self.user_visible_outputs
|
| 753 |
+
and not is_input_for_as_strided
|
| 754 |
+
):
|
| 755 |
+
stride_order = ir.NHWC_STRIDE_ORDER
|
| 756 |
+
result = ir.ExternKernel.require_stride_order(result, stride_order)
|
| 757 |
+
|
| 758 |
+
# Realize if (1) any user need inputs realized, or (2) there is
|
| 759 |
+
# already too many reads and rematerializing can be bad.
|
| 760 |
+
num_users = len(set(n.users))
|
| 761 |
+
if num_users > 1 and isinstance(result, TensorBox):
|
| 762 |
+
for user in n.users:
|
| 763 |
+
if user.target in needs_realized_inputs:
|
| 764 |
+
result.realize_hint()
|
| 765 |
+
# This inclusion is somewhat controversial (from
|
| 766 |
+
# discussion between Horace, Natalia, and Elias).
|
| 767 |
+
# Currently, it's not very clear why this is helpful.
|
| 768 |
+
# The general idea here is that even though a node may
|
| 769 |
+
# have FlexibleLayout, we still often *treat* it as if
|
| 770 |
+
# it was contiguous. This appears to sometimes result in
|
| 771 |
+
# suboptimal behavior.
|
| 772 |
+
#
|
| 773 |
+
# When we do a better job selecting layout, we should
|
| 774 |
+
# revisit this.
|
| 775 |
+
need_fixed_layout = [
|
| 776 |
+
torch.ops.aten.convolution_backward.default,
|
| 777 |
+
torch.ops.aten.mm.default,
|
| 778 |
+
torch.ops.aten._int_mm.default,
|
| 779 |
+
]
|
| 780 |
+
if not self.layout_opt:
|
| 781 |
+
need_fixed_layout.append(torch.ops.aten.convolution.default)
|
| 782 |
+
if torch._C._has_mkldnn:
|
| 783 |
+
need_fixed_layout += [
|
| 784 |
+
torch.ops.mkldnn._convolution_pointwise.default,
|
| 785 |
+
torch.ops.mkldnn._convolution_pointwise.binary,
|
| 786 |
+
torch.ops.mkldnn._convolution_pointwise_.binary,
|
| 787 |
+
torch.ops.mkldnn._convolution_transpose_pointwise.default,
|
| 788 |
+
torch.ops.mkldnn._linear_pointwise.default,
|
| 789 |
+
torch.ops.mkldnn._linear_pointwise.binary,
|
| 790 |
+
torch.ops.aten.mkldnn_rnn_layer.default,
|
| 791 |
+
torch.ops.onednn.qconv2d_pointwise.default,
|
| 792 |
+
torch.ops.onednn.qconv2d_pointwise.binary,
|
| 793 |
+
torch.ops.onednn.qlinear_pointwise.default,
|
| 794 |
+
]
|
| 795 |
+
if torch._C.has_mkl:
|
| 796 |
+
need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
|
| 797 |
+
if user.target in need_fixed_layout:
|
| 798 |
+
result = ir.ExternKernel.require_stride_order(
|
| 799 |
+
result, ir.get_stride_order(n.meta["val"].stride())
|
| 800 |
+
)
|
| 801 |
+
if user.op == "output":
|
| 802 |
+
if isinstance(result.data.data, (Pointwise, Reduction)):
|
| 803 |
+
result.realize()
|
| 804 |
+
|
| 805 |
+
# TODO(jansel): introduce a store vs inline choice
|
| 806 |
+
result.mark_reuse(len(n.users))
|
| 807 |
+
|
| 808 |
+
# Realize if the IRNode already has accumulated lots of reads
|
| 809 |
+
if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
|
| 810 |
+
# Prevent excessive accumulation in a computed buffer, when
|
| 811 |
+
# there are multiple branches each with small number of memory
|
| 812 |
+
# reads, but they converge to a user.
|
| 813 |
+
result.realize_hint()
|
| 814 |
+
|
| 815 |
+
# This is not complete, but it doesn't have to be: origin_node
|
| 816 |
+
# tracking is best effort. The logic here critically relies on direct
|
| 817 |
+
# TensorBox -> StorageBox denoting a non-view; we don't bother trying
|
| 818 |
+
# to get views to work. Feel free to add any extra cases as needed.
|
| 819 |
+
#
|
| 820 |
+
# Note: we can't YOLO tree_map over this result, because if there are
|
| 821 |
+
# buffers or a view involved, we might not be able to validly assign
|
| 822 |
+
# the origin_node here.
|
| 823 |
+
if isinstance(result, TensorBox) and isinstance(result.data, ir.StorageBox):
|
| 824 |
+
if isinstance(result.data.data, ir.Loops):
|
| 825 |
+
result.data.data.origin_node = n
|
| 826 |
+
elif isinstance(result.data.data, ir.Buffer):
|
| 827 |
+
result.data.data.origin_node = n
|
| 828 |
+
if isinstance(result.data.data, ir.ComputedBuffer) and isinstance(
|
| 829 |
+
result.data.data.data, ir.Loops
|
| 830 |
+
):
|
| 831 |
+
result.data.data.data.origin_node = n
|
| 832 |
+
# Not really multi-output, can straightforwardly recurse in
|
| 833 |
+
elif (
|
| 834 |
+
isinstance(result.data.data, ir.MultiOutput)
|
| 835 |
+
and not result.data.data.indices
|
| 836 |
+
):
|
| 837 |
+
if isinstance(result.data.data.inputs[0], ir.Buffer):
|
| 838 |
+
result.data.data.inputs[0].origin_node = n
|
| 839 |
+
|
| 840 |
+
self.register_users_of(result)
|
| 841 |
+
|
| 842 |
+
return result
|
| 843 |
+
|
| 844 |
+
def check_cpp_codegen_disabled(self):
|
| 845 |
+
if config.disable_cpp_codegen:
|
| 846 |
+
self.disable_cpp_wrapper("cpp codegen disabled")
|
| 847 |
+
|
| 848 |
+
def check_platform(self):
|
| 849 |
+
if sys.platform != "linux":
|
| 850 |
+
self.disable_cpp_wrapper("platform not linux")
|
| 851 |
+
|
| 852 |
+
def check_input_for_cpp_buffer(self):
|
| 853 |
+
for value in self.graph_inputs.values():
|
| 854 |
+
dtype = None
|
| 855 |
+
if isinstance(value, TensorBox):
|
| 856 |
+
dtype = value.get_dtype()
|
| 857 |
+
elif isinstance(
|
| 858 |
+
value, (sympy.Symbol, sympy.Expr, sympy.core.numbers.Integer)
|
| 859 |
+
):
|
| 860 |
+
dtype = may_get_constant_buffer_dtype(value)
|
| 861 |
+
|
| 862 |
+
if not supported_dtype_of_cpp_wrapper(dtype, self.cuda):
|
| 863 |
+
self.disable_cpp_wrapper("unsupported inputs dtype")
|
| 864 |
+
|
| 865 |
+
@contextmanager
|
| 866 |
+
def set_current_node(self, node: torch.fx.Node):
|
| 867 |
+
old = self.current_node
|
| 868 |
+
try:
|
| 869 |
+
self.current_node = node
|
| 870 |
+
yield
|
| 871 |
+
finally:
|
| 872 |
+
self.current_node = old
|
| 873 |
+
|
| 874 |
+
def check_cpp_wrapper(self):
|
| 875 |
+
self.check_cpp_codegen_disabled()
|
| 876 |
+
self.check_platform()
|
| 877 |
+
self.check_input_for_cpp_buffer()
|
| 878 |
+
|
| 879 |
+
def init_wrapper_code(self):
|
| 880 |
+
self.cuda = "cuda" in self.device_types
|
| 881 |
+
if self.cpp_wrapper:
|
| 882 |
+
self.check_cpp_wrapper()
|
| 883 |
+
# Re-check self.cpp_wrapper because it might be disabled due to failed checking
|
| 884 |
+
if self.cuda:
|
| 885 |
+
assert self.cpp_wrapper, "CudaWrapperCodeGen hit unsupported case"
|
| 886 |
+
|
| 887 |
+
if self.cpp_wrapper:
|
| 888 |
+
self.wrapper_code = (
|
| 889 |
+
CudaWrapperCodeGen() if self.cuda else CppWrapperCodeGen()
|
| 890 |
+
)
|
| 891 |
+
return
|
| 892 |
+
|
| 893 |
+
device_types = self.device_types.copy()
|
| 894 |
+
# In terms of some operations that don't have input tensors, we need to
|
| 895 |
+
# check the deivce of the buffers.
|
| 896 |
+
for buffer in self.buffers:
|
| 897 |
+
device_types.add(buffer.get_device().type)
|
| 898 |
+
device_types.discard("cpu")
|
| 899 |
+
# TODO(Eikan): Only support mixing cpu and other device now.
|
| 900 |
+
assert len(device_types) <= 1, "Does not support mixing {}".format(
|
| 901 |
+
"+".join(device_types)
|
| 902 |
+
)
|
| 903 |
+
only_cpu = len(device_types) == 0
|
| 904 |
+
device_type = "cpu" if only_cpu else device_types.pop()
|
| 905 |
+
wrapper_code_gen_cls = get_wrapper_codegen_for_device(device_type)
|
| 906 |
+
self.wrapper_code = wrapper_code_gen_cls()
|
| 907 |
+
|
| 908 |
+
def codegen(self):
|
| 909 |
+
from .scheduler import Scheduler
|
| 910 |
+
|
| 911 |
+
self.init_wrapper_code()
|
| 912 |
+
|
| 913 |
+
self.scheduler = Scheduler(self.buffers)
|
| 914 |
+
assert self.scheduler is not None # mypy can't figure this out
|
| 915 |
+
self.scheduler.codegen()
|
| 916 |
+
assert self.wrapper_code is not None
|
| 917 |
+
return self.wrapper_code.generate()
|
| 918 |
+
|
| 919 |
+
def count_bytes(self):
|
| 920 |
+
from .scheduler import Scheduler
|
| 921 |
+
|
| 922 |
+
scheduler = Scheduler(self.buffers)
|
| 923 |
+
|
| 924 |
+
total_bytes = 0
|
| 925 |
+
node_counts = []
|
| 926 |
+
node_runtimes = []
|
| 927 |
+
for node in scheduler.nodes:
|
| 928 |
+
num_bytes = node.get_read_write_buffers_sizes()
|
| 929 |
+
total_bytes += num_bytes
|
| 930 |
+
node_counts.append((node, num_bytes // 4))
|
| 931 |
+
node_runtimes.append((node, node.get_estimated_runtime()))
|
| 932 |
+
return total_bytes, node_counts, node_runtimes
|
| 933 |
+
|
| 934 |
+
@dynamo_timed
|
| 935 |
+
def compile_to_module(self):
|
| 936 |
+
from .codecache import PyCodeCache
|
| 937 |
+
|
| 938 |
+
code, linemap = self.codegen()
|
| 939 |
+
linemap = [(line_no, node.stack_trace) for line_no, node in linemap]
|
| 940 |
+
key, path = PyCodeCache.write(code)
|
| 941 |
+
mod = PyCodeCache.load_by_key_path(key, path, linemap=linemap)
|
| 942 |
+
self.cache_key = key
|
| 943 |
+
self.cache_path = path
|
| 944 |
+
self.cache_linemap = linemap
|
| 945 |
+
|
| 946 |
+
for name, value in self.constants.items():
|
| 947 |
+
setattr(mod, name, value)
|
| 948 |
+
|
| 949 |
+
# Logged twice as per https://github.com/pytorch/pytorch/pull/99038#discussion_r1167826029
|
| 950 |
+
# TODO. Revisit this once the logging API is more mature
|
| 951 |
+
output_code_log.info("Output code written to: %s", mod.__file__)
|
| 952 |
+
log.debug("Output code written to: %s", mod.__file__)
|
| 953 |
+
output_code_log.debug("Output code: \n%s", code)
|
| 954 |
+
if config.benchmark_kernel:
|
| 955 |
+
print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
|
| 956 |
+
V.debug.output_code(mod.__file__)
|
| 957 |
+
V.debug.copy(os.path.splitext(mod.__file__)[0] + ".debug")
|
| 958 |
+
return mod
|
| 959 |
+
|
| 960 |
+
def compile_to_fn(self):
|
| 961 |
+
if self.aot_mode and self.cpp_wrapper:
|
| 962 |
+
from .codecache import AotCodeCache
|
| 963 |
+
|
| 964 |
+
code, linemap = self.codegen()
|
| 965 |
+
output_code_log.debug("Output code: \n%s", code)
|
| 966 |
+
|
| 967 |
+
# Directly return the file path with the compiled code
|
| 968 |
+
return AotCodeCache.compile(self, code, cuda=self.cuda)
|
| 969 |
+
else:
|
| 970 |
+
return self.compile_to_module().call
|
| 971 |
+
|
| 972 |
+
def get_output_names(self):
|
| 973 |
+
assert self.graph_outputs is not None
|
| 974 |
+
return [
|
| 975 |
+
node.get_name()
|
| 976 |
+
for node in self.graph_outputs
|
| 977 |
+
if not isinstance(node, ir.NoneAsConstantBuffer)
|
| 978 |
+
and not isinstance(node, ir.ShapeAsConstantBuffer)
|
| 979 |
+
]
|
| 980 |
+
|
| 981 |
+
def is_unspec_arg(self, name: str):
|
| 982 |
+
# dynamo wraps unspec variable as 0d CPU tensor,
|
| 983 |
+
# need to convert to scalar during codegen (triton only)
|
| 984 |
+
return (
|
| 985 |
+
name in self.graph_inputs.keys()
|
| 986 |
+
and self.graph_inputs[name].get_numel() == 1
|
| 987 |
+
and self.graph_inputs[name].get_device().type == "cpu"
|
| 988 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/hooks.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
|
| 3 |
+
# Executed in the order they're registered
|
| 4 |
+
INTERMEDIATE_HOOKS = []
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@contextlib.contextmanager
|
| 8 |
+
def intermediate_hook(fn):
|
| 9 |
+
INTERMEDIATE_HOOKS.append(fn)
|
| 10 |
+
try:
|
| 11 |
+
yield
|
| 12 |
+
finally:
|
| 13 |
+
INTERMEDIATE_HOOKS.pop()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def run_intermediate_hooks(name, val):
|
| 17 |
+
global INTERMEDIATE_HOOKS
|
| 18 |
+
hooks = INTERMEDIATE_HOOKS
|
| 19 |
+
INTERMEDIATE_HOOKS = []
|
| 20 |
+
try:
|
| 21 |
+
for hook in hooks:
|
| 22 |
+
hook(name, val)
|
| 23 |
+
finally:
|
| 24 |
+
INTERMEDIATE_HOOKS = hooks
|
llava_next/lib/python3.10/site-packages/torch/_inductor/inductor_prims.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import _prims
|
| 5 |
+
|
| 6 |
+
log = logging.getLogger(__name__)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def make_prim(
|
| 10 |
+
schema,
|
| 11 |
+
impl_aten,
|
| 12 |
+
return_type=_prims.RETURN_TYPE.NEW,
|
| 13 |
+
doc="",
|
| 14 |
+
tags=None,
|
| 15 |
+
):
|
| 16 |
+
def meta(*args, **kwargs):
|
| 17 |
+
return _prims.TensorMeta(impl_aten(*args, **kwargs))
|
| 18 |
+
|
| 19 |
+
return _prims._make_prim(
|
| 20 |
+
schema=schema,
|
| 21 |
+
return_type=return_type,
|
| 22 |
+
meta=meta,
|
| 23 |
+
impl_aten=impl_aten,
|
| 24 |
+
doc=doc,
|
| 25 |
+
tags=tags,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def eager_force_stride(input_tensor, stride):
|
| 30 |
+
if input_tensor.stride() == stride:
|
| 31 |
+
return input_tensor
|
| 32 |
+
new_tensor = input_tensor.clone().as_strided(
|
| 33 |
+
input_tensor.shape,
|
| 34 |
+
stride,
|
| 35 |
+
)
|
| 36 |
+
new_tensor.copy_(input_tensor)
|
| 37 |
+
return new_tensor
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Custom prims used for handling randomness
|
| 41 |
+
seed = make_prim(
|
| 42 |
+
"inductor_seed(Device device) -> Tensor",
|
| 43 |
+
lambda device: torch.randint(2**63 - 1, [], device=device),
|
| 44 |
+
doc="create a fresh seed (one per call) for use with inductor_rand",
|
| 45 |
+
tags=(torch.Tag.nondeterministic_seeded,),
|
| 46 |
+
)
|
| 47 |
+
seeds = make_prim(
|
| 48 |
+
"inductor_seeds(int count, Device device) -> Tensor",
|
| 49 |
+
lambda count, device: torch.randint(2**63 - 1, [count], device=device),
|
| 50 |
+
doc="Horizontally fusion of many inductor_seed() calls",
|
| 51 |
+
tags=(torch.Tag.nondeterministic_seeded,),
|
| 52 |
+
)
|
| 53 |
+
lookup_seed = make_prim(
|
| 54 |
+
# if inductor_lookup_seed changes, update partitioners.py
|
| 55 |
+
"inductor_lookup_seed(Tensor seeds, int index) -> Tensor",
|
| 56 |
+
lambda seeds, index: seeds[index],
|
| 57 |
+
doc="Extract a single seed from the result of inductor_seeds()",
|
| 58 |
+
)
|
| 59 |
+
random = make_prim(
|
| 60 |
+
"inductor_random(SymInt[] size, Tensor seed, str mode) -> Tensor",
|
| 61 |
+
lambda size, seed, mode: getattr(torch, mode)(size, device=seed.device),
|
| 62 |
+
doc="torch.rand()/torch.randn() using backend-specific RNG that can be fused",
|
| 63 |
+
)
|
| 64 |
+
randint = make_prim(
|
| 65 |
+
"inductor_randint(SymInt low, SymInt high, SymInt[] size, Tensor seed) -> Tensor",
|
| 66 |
+
lambda low, high, size, seed: torch.randint(low, high, size, device=seed.device),
|
| 67 |
+
doc="torch.randint() using backend-specific RNG that can be fused",
|
| 68 |
+
)
|
| 69 |
+
force_stride_order = make_prim(
|
| 70 |
+
"inductor_force_stride_order(Tensor input, SymInt[] stride) -> Tensor",
|
| 71 |
+
lambda input_tensor, stride: eager_force_stride(input_tensor, stride),
|
| 72 |
+
doc="Force the stride order for input tensor. No-op if the input tensor already has the stride. Do a copy otherwise",
|
| 73 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/lowering.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/metrics.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import List, Tuple, TYPE_CHECKING, Union
|
| 4 |
+
|
| 5 |
+
# Prevent circular import
|
| 6 |
+
if TYPE_CHECKING:
|
| 7 |
+
from torch._inductor.scheduler import (
|
| 8 |
+
BaseSchedulerNode,
|
| 9 |
+
ExternKernelSchedulerNode,
|
| 10 |
+
NopKernelSchedulerNode,
|
| 11 |
+
SchedulerNode,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
# counter for tracking how many kernels have been generated
|
| 15 |
+
generated_kernel_count = 0
|
| 16 |
+
generated_cpp_vec_kernel_count = 0
|
| 17 |
+
num_bytes_accessed = 0
|
| 18 |
+
nodes_num_elem: List[
|
| 19 |
+
Tuple[
|
| 20 |
+
Union[NopKernelSchedulerNode, SchedulerNode, ExternKernelSchedulerNode],
|
| 21 |
+
int,
|
| 22 |
+
]
|
| 23 |
+
] = []
|
| 24 |
+
node_runtimes: List[Tuple[BaseSchedulerNode, float]] = []
|
| 25 |
+
|
| 26 |
+
# counters for tracking fusions
|
| 27 |
+
ir_nodes_pre_fusion = 0
|
| 28 |
+
|
| 29 |
+
# counters for tracking to_dtype inserted
|
| 30 |
+
cpp_to_dtype_count = 0
|
| 31 |
+
|
| 32 |
+
# counters for tracking cpp_wrapper disabled
|
| 33 |
+
disable_cpp_wrapper = 0
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# reset all counters
|
| 37 |
+
def reset():
|
| 38 |
+
global generated_kernel_count
|
| 39 |
+
global generated_cpp_vec_kernel_count
|
| 40 |
+
global num_bytes_accessed, nodes_num_elem
|
| 41 |
+
global ir_nodes_pre_fusion
|
| 42 |
+
global cpp_to_dtype_count
|
| 43 |
+
global disable_cpp_wrapper
|
| 44 |
+
|
| 45 |
+
generated_kernel_count = 0
|
| 46 |
+
generated_cpp_vec_kernel_count = 0
|
| 47 |
+
num_bytes_accessed = 0
|
| 48 |
+
nodes_num_elem.clear()
|
| 49 |
+
node_runtimes.clear()
|
| 50 |
+
ir_nodes_pre_fusion = 0
|
| 51 |
+
cpp_to_dtype_count = 0
|
| 52 |
+
disable_cpp_wrapper = 0
|
llava_next/lib/python3.10/site-packages/torch/_inductor/optimize_indexing.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import sympy
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
| 7 |
+
from .ir import LoopBody
|
| 8 |
+
from .utils import dominated_nodes
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def val_expressable_in_32_bits(val):
|
| 12 |
+
if getattr(val, "is_Boolean", False):
|
| 13 |
+
return True
|
| 14 |
+
|
| 15 |
+
if isinstance(val, sympy.Expr):
|
| 16 |
+
assert val.is_constant()
|
| 17 |
+
if val.is_Integer or val.is_Boolean:
|
| 18 |
+
val = int(val)
|
| 19 |
+
else:
|
| 20 |
+
val = float(val)
|
| 21 |
+
|
| 22 |
+
# bound within mantissa
|
| 23 |
+
if isinstance(val, float):
|
| 24 |
+
return val <= (2**24) and val >= -(2**24)
|
| 25 |
+
|
| 26 |
+
if isinstance(val, int):
|
| 27 |
+
iinfo = torch.iinfo(torch.int32)
|
| 28 |
+
return val <= iinfo.max and val >= iinfo.min
|
| 29 |
+
|
| 30 |
+
raise Exception(f"Unexpected value {val}")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def range_expressable_in_32_bits(range):
|
| 34 |
+
return val_expressable_in_32_bits(range.lower) and val_expressable_in_32_bits(
|
| 35 |
+
range.upper
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def try_to_reduce_precision(node, bounds, indirect_vars, indices, replacement_vals):
|
| 40 |
+
# if a downstream use of a node explicitly converts to int32, or float16/float32/float64,
|
| 41 |
+
# then it's precision is set for that chain of uses, and we don't need to consider those
|
| 42 |
+
# dominated values
|
| 43 |
+
def skip_filter(node):
|
| 44 |
+
return node.target == "to_dtype" and node.args[2] in (
|
| 45 |
+
torch.int32,
|
| 46 |
+
torch.float32,
|
| 47 |
+
torch.float64,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# TODO - there are dominated uses whose dtype does not depend on whether
|
| 51 |
+
# we reduce the precision here, e.g. add(int64, int64) one of the args can be reduced to
|
| 52 |
+
# int32 without changing the output precision of the node. this case hasn't shown up
|
| 53 |
+
for dominated in dominated_nodes([node], skip_filter):
|
| 54 |
+
if dominated.target in ["store", "output"]:
|
| 55 |
+
continue
|
| 56 |
+
|
| 57 |
+
if "set_indirect" in dominated.target:
|
| 58 |
+
idx = int(dominated.target[len("set_indirect") :])
|
| 59 |
+
indirect_var = indirect_vars[idx]
|
| 60 |
+
|
| 61 |
+
# We check that we can compute all the indices it's involved in with int32
|
| 62 |
+
for index, expr in indices.items():
|
| 63 |
+
if indirect_var in expr.free_symbols:
|
| 64 |
+
index_val = replacement_vals[index]
|
| 65 |
+
|
| 66 |
+
if math.isinf(index_val.lower) or math.isinf(index_val.upper):
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
# all indices are integers, so make sure that we
|
| 70 |
+
# use the bounds of integers instead of floats.
|
| 71 |
+
# TODO - not sure if we should be doing int/float casts while tracing,
|
| 72 |
+
# might interfere with sympy.
|
| 73 |
+
|
| 74 |
+
index_val_int = ValueRanges(
|
| 75 |
+
int(index_val.lower), int(index_val.upper)
|
| 76 |
+
)
|
| 77 |
+
if not range_expressable_in_32_bits(index_val_int):
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
if not range_expressable_in_32_bits(bounds[dominated]):
|
| 81 |
+
return
|
| 82 |
+
|
| 83 |
+
args = list(node.args)
|
| 84 |
+
args[2] = torch.int32
|
| 85 |
+
node.args = tuple(args)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def indexing_dtype_strength_reduction(loop_body: LoopBody):
|
| 89 |
+
"""
|
| 90 |
+
Performs Value Range Analysis on LoopBody's fx graph to reduce precision of
|
| 91 |
+
intermediaries from int64 to int32
|
| 92 |
+
"""
|
| 93 |
+
bv = loop_body.bounds()
|
| 94 |
+
|
| 95 |
+
int64_dtype_nodes = [
|
| 96 |
+
node
|
| 97 |
+
for node in loop_body.get_nodes()
|
| 98 |
+
if (
|
| 99 |
+
node.target == "to_dtype"
|
| 100 |
+
and node.args[2] == torch.int64
|
| 101 |
+
and node not in bv.unbounded_vars
|
| 102 |
+
)
|
| 103 |
+
]
|
| 104 |
+
if not int64_dtype_nodes:
|
| 105 |
+
return
|
| 106 |
+
|
| 107 |
+
bounds = bv.get_bounds()
|
| 108 |
+
|
| 109 |
+
# TODO - if dominated node of one to_dtype is not expressible in int32,
|
| 110 |
+
# we should short circuit another to_dtype node if that node also dominates
|
| 111 |
+
for node in int64_dtype_nodes:
|
| 112 |
+
try_to_reduce_precision(
|
| 113 |
+
node,
|
| 114 |
+
bounds,
|
| 115 |
+
loop_body.indirect_vars,
|
| 116 |
+
loop_body.indexing_exprs,
|
| 117 |
+
bv.replacement_vals,
|
| 118 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/pattern_matcher.py
ADDED
|
@@ -0,0 +1,1169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
from collections import defaultdict
|
| 9 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch._guards
|
| 13 |
+
import torch.fx
|
| 14 |
+
import torch.utils._pytree as pytree
|
| 15 |
+
from torch._dynamo.utils import counters
|
| 16 |
+
from torch._prims_common import is_integer_dtype
|
| 17 |
+
from torch.fx import Node
|
| 18 |
+
from torch.fx.experimental.proxy_tensor import make_fx, maybe_disable_fake_tensor_mode
|
| 19 |
+
from torch.fx.immutable_collections import immutable_dict, immutable_list
|
| 20 |
+
|
| 21 |
+
from .._functorch import config as functorch_config
|
| 22 |
+
from .._functorch.aot_autograd import aot_function, make_boxed_func
|
| 23 |
+
from .._functorch.partitioners import default_partition
|
| 24 |
+
from .._subclasses import FakeTensorMode
|
| 25 |
+
from ..fx import Transformer
|
| 26 |
+
from . import config
|
| 27 |
+
from .decomposition import select_decomp_table
|
| 28 |
+
from .lowering import fallback_node_due_to_unsupported_type
|
| 29 |
+
|
| 30 |
+
log = logging.getLogger(__name__)
|
| 31 |
+
aten = torch.ops.aten
|
| 32 |
+
prims = torch.ops.prims
|
| 33 |
+
|
| 34 |
+
Constant = Any
|
| 35 |
+
NodeOrConstant = Union[Constant, torch.fx.Node]
|
| 36 |
+
|
| 37 |
+
# Sentinel indicating multiple quantities can be matched
|
| 38 |
+
MULTIPLE = object()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Match:
|
| 42 |
+
"""
|
| 43 |
+
Represents a successfully matched pattern.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, pattern, args=None, kwargs=None):
|
| 47 |
+
super().__init__()
|
| 48 |
+
self.pattern = pattern
|
| 49 |
+
# The input nodes that must be passed in to the result
|
| 50 |
+
self.args = args or []
|
| 51 |
+
self.kwargs = kwargs or {}
|
| 52 |
+
# The nodes matched in this expression
|
| 53 |
+
self.nodes = []
|
| 54 |
+
# Mapping CallFunction to the node.target
|
| 55 |
+
self.targets = {}
|
| 56 |
+
self.ctx: MatchContext = None
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def graph(self):
|
| 60 |
+
return self.ctx.graph
|
| 61 |
+
|
| 62 |
+
def extend(self, other):
|
| 63 |
+
if self.kwargs:
|
| 64 |
+
for key in set(self.kwargs.keys()) & set(other.kwargs.keys()):
|
| 65 |
+
if self.kwargs[key] != other.kwargs[key]:
|
| 66 |
+
raise FailedMatch(f"kwarg mismatch: {key}")
|
| 67 |
+
self.args.extend(other.args)
|
| 68 |
+
self.nodes.extend(other.nodes)
|
| 69 |
+
self.kwargs.update(other.kwargs)
|
| 70 |
+
self.targets.update(other.targets)
|
| 71 |
+
|
| 72 |
+
def bundle(self):
|
| 73 |
+
# Wrap args in an extra list
|
| 74 |
+
self.args = [tuple(self.args)] if self.args else []
|
| 75 |
+
return self
|
| 76 |
+
|
| 77 |
+
def __repr__(self):
|
| 78 |
+
return f"Match(..., {self.args}, {self.kwargs})"
|
| 79 |
+
|
| 80 |
+
def erase_nodes(self, graph: torch.fx.Graph):
|
| 81 |
+
for n in reversed(self.nodes):
|
| 82 |
+
if not n._erased:
|
| 83 |
+
graph.erase_node(n)
|
| 84 |
+
|
| 85 |
+
def output_nodes(self):
|
| 86 |
+
return [
|
| 87 |
+
(self.ctx.pattern_to_node[p] if p is not None else None)
|
| 88 |
+
for p in self.ctx.outputs
|
| 89 |
+
]
|
| 90 |
+
|
| 91 |
+
def output_node(self):
|
| 92 |
+
return [p for p in self.output_nodes() if p][0]
|
| 93 |
+
|
| 94 |
+
def replace_with_graph(self, replacement_graph, args):
|
| 95 |
+
ReplacementPatternEntry.replace_with_graph(
|
| 96 |
+
self, self.ctx.graph, replacement_graph, args
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
def replace_by_example(self, replacement_fn, args, trace_fn=None):
|
| 100 |
+
if trace_fn is None:
|
| 101 |
+
trace_fn = inference_graph
|
| 102 |
+
replacement = trace_fn(
|
| 103 |
+
replacement_fn, torch.fx.map_arg(args, lambda arg: arg.meta["val"])
|
| 104 |
+
)
|
| 105 |
+
ReplacementPatternEntry.replace_with_graph(
|
| 106 |
+
self,
|
| 107 |
+
self.ctx.graph,
|
| 108 |
+
replacement,
|
| 109 |
+
args,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class FailedMatch(RuntimeError):
|
| 114 |
+
def __bool__(self):
|
| 115 |
+
return False
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class MatchContext:
|
| 119 |
+
"""
|
| 120 |
+
State needed while running PatternExpr._match().
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
def __init__(
|
| 124 |
+
self,
|
| 125 |
+
outputs: List["PatternExpr"],
|
| 126 |
+
pattern_to_node: Optional[Dict["PatternExpr", Node]] = None,
|
| 127 |
+
*,
|
| 128 |
+
graph: torch.fx.Graph,
|
| 129 |
+
):
|
| 130 |
+
self.outputs = outputs
|
| 131 |
+
self.pattern_to_node = pattern_to_node
|
| 132 |
+
self.graph = graph
|
| 133 |
+
self.exclusive_node_set = []
|
| 134 |
+
if self.pattern_to_node is None:
|
| 135 |
+
self.pattern_to_node = {}
|
| 136 |
+
|
| 137 |
+
def match(self, pattern, node):
|
| 138 |
+
"""wrapper to check reused nodes in patterns"""
|
| 139 |
+
if pattern in self.pattern_to_node:
|
| 140 |
+
if self.pattern_to_node[pattern] == node:
|
| 141 |
+
return Match(pattern) # already checked this node
|
| 142 |
+
else:
|
| 143 |
+
return FailedMatch("repeated pattern differs")
|
| 144 |
+
m = pattern._match(node, self)
|
| 145 |
+
assert pattern not in self.pattern_to_node
|
| 146 |
+
self.pattern_to_node[pattern] = node if m else None
|
| 147 |
+
m.ctx = self
|
| 148 |
+
return m
|
| 149 |
+
|
| 150 |
+
def filter_multi_user_patterns(self):
|
| 151 |
+
return {
|
| 152 |
+
pattern: node
|
| 153 |
+
for pattern, node in self.pattern_to_node.items()
|
| 154 |
+
if pattern.has_multiple_users() and node is not None
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class PatternExpr:
|
| 159 |
+
"""
|
| 160 |
+
Base class for types of patterns
|
| 161 |
+
"""
|
| 162 |
+
|
| 163 |
+
def _match(
|
| 164 |
+
self, node: torch.fx.Node, ctx: MatchContext
|
| 165 |
+
) -> Union[Match, FailedMatch]:
|
| 166 |
+
raise NotImplementedError()
|
| 167 |
+
|
| 168 |
+
def match(self, node: torch.fx.Node) -> Union[Match, FailedMatch]:
|
| 169 |
+
try:
|
| 170 |
+
return MatchContext([self], graph=node.graph).match(self, node)
|
| 171 |
+
except FailedMatch as e:
|
| 172 |
+
return e
|
| 173 |
+
|
| 174 |
+
def has_multiple_users(self) -> bool:
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
def __repr__(self):
|
| 178 |
+
return self.__class__.__name__ + "()"
|
| 179 |
+
|
| 180 |
+
def find_anchor_nodes(self, ctx: MatchContext, searched):
|
| 181 |
+
if self in ctx.pattern_to_node:
|
| 182 |
+
yield ctx.pattern_to_node[self]
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class Arg(PatternExpr):
|
| 186 |
+
"""
|
| 187 |
+
Capture an arg which will become an input to the handler. Args are
|
| 188 |
+
passed in depth first order.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
def _match(self, node: NodeOrConstant, ctx: MatchContext):
|
| 192 |
+
return Match(self, args=[node]) # matches anything
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class Ignored(PatternExpr):
|
| 196 |
+
"""
|
| 197 |
+
Match an arg, but don't pass it to handler
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
def _match(self, node: NodeOrConstant, ctx: MatchContext):
|
| 201 |
+
return Match(self) # matches anything
|
| 202 |
+
|
| 203 |
+
def __repr__(self):
|
| 204 |
+
return "*"
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
class KeywordArg(PatternExpr):
|
| 208 |
+
"""
|
| 209 |
+
Capture a kwarg which will become an input to the handler.
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
def __init__(self, name):
|
| 213 |
+
super().__init__()
|
| 214 |
+
self.name = name
|
| 215 |
+
|
| 216 |
+
def __repr__(self):
|
| 217 |
+
return f"KeywordArg({self.name!r})"
|
| 218 |
+
|
| 219 |
+
def _match(self, node: NodeOrConstant, ctx: MatchContext):
|
| 220 |
+
return Match(self, kwargs={self.name: node}) # matches anything
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class ExclusiveKeywordArg(PatternExpr):
|
| 224 |
+
"""
|
| 225 |
+
Capture a kwarg which will become an input to the handler.
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __init__(self, name):
|
| 229 |
+
super().__init__()
|
| 230 |
+
self.name = name
|
| 231 |
+
|
| 232 |
+
def __repr__(self):
|
| 233 |
+
return f"ExclusiveKeywordArg({self.name!r})"
|
| 234 |
+
|
| 235 |
+
def _match(self, node: NodeOrConstant, ctx: MatchContext):
|
| 236 |
+
if node in ctx.exclusive_node_set:
|
| 237 |
+
return FailedMatch("exclusive arg appears twice")
|
| 238 |
+
|
| 239 |
+
ctx.exclusive_node_set.append(node)
|
| 240 |
+
return Match(self, kwargs={self.name: node}) # matches anything
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class _TargetExpr(PatternExpr):
|
| 244 |
+
"""
|
| 245 |
+
Base class for filtering match by node.target
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
op = None
|
| 249 |
+
|
| 250 |
+
def __init__(self, fns, users=1):
|
| 251 |
+
if not self.op:
|
| 252 |
+
raise NotImplementedError("Shouldn't directly use _BaseNodeMatch")
|
| 253 |
+
super().__init__()
|
| 254 |
+
fns = [fns] if callable(fns) or isinstance(fns, str) else list(fns)
|
| 255 |
+
for fn in list(fns):
|
| 256 |
+
if isinstance(fn, torch._ops.OpOverloadPacket):
|
| 257 |
+
fns.extend([getattr(fn, overload) for overload in fn.overloads()])
|
| 258 |
+
|
| 259 |
+
self.fns = fns
|
| 260 |
+
self.fns_set = set(fns)
|
| 261 |
+
self.users = users
|
| 262 |
+
|
| 263 |
+
def fns_repr(self):
|
| 264 |
+
return (
|
| 265 |
+
f"[{self.fns[0].__name__}, ...]"
|
| 266 |
+
if len(self.fns) > 1
|
| 267 |
+
else self.fns[0].__name__
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
def __repr__(self):
|
| 271 |
+
return f"{self.__class__.__name__}({self.fns_repr()})"
|
| 272 |
+
|
| 273 |
+
def has_multiple_users(self) -> bool:
|
| 274 |
+
return self.users is MULTIPLE or self.users > 1
|
| 275 |
+
|
| 276 |
+
def find_anchor_nodes(self, ctx: MatchContext, searched):
|
| 277 |
+
raise NotImplementedError()
|
| 278 |
+
|
| 279 |
+
def _match_fns(self, node: torch.fx.Node):
|
| 280 |
+
return (
|
| 281 |
+
isinstance(node, torch.fx.Node)
|
| 282 |
+
and node.op == self.op
|
| 283 |
+
and node.target in self.fns_set
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
def _match_users(self, node: torch.fx.Node, ctx: MatchContext):
|
| 287 |
+
return (
|
| 288 |
+
self in ctx.outputs
|
| 289 |
+
or self.users is MULTIPLE
|
| 290 |
+
or len(node.users) == self.users
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class _TargetArgsExpr(_TargetExpr):
|
| 295 |
+
"""
|
| 296 |
+
Base class for filtering match by node.{target,args,kwargs}
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
def __init__(self, fns, *args, _users=1, **kwargs):
|
| 300 |
+
super().__init__(fns, _users)
|
| 301 |
+
self.args = tuple(args)
|
| 302 |
+
self.kwargs = dict(kwargs)
|
| 303 |
+
if any(
|
| 304 |
+
isinstance(x, (dict, list, tuple))
|
| 305 |
+
for x in itertools.chain(args, kwargs.values())
|
| 306 |
+
):
|
| 307 |
+
self.flatten = self.pytree_flatten
|
| 308 |
+
else:
|
| 309 |
+
self.flatten = self.simple_flatten
|
| 310 |
+
self.flat_args_kwargs = self.flatten(self.args, self.kwargs)
|
| 311 |
+
|
| 312 |
+
@staticmethod
|
| 313 |
+
def simple_flatten(args, kwargs):
|
| 314 |
+
return (*args, *kwargs.values()), (len(args), *kwargs.keys())
|
| 315 |
+
|
| 316 |
+
@staticmethod
|
| 317 |
+
def pytree_flatten(args, kwargs):
|
| 318 |
+
def norm_spec(s: pytree.TreeSpec):
|
| 319 |
+
if s.type is None:
|
| 320 |
+
return s
|
| 321 |
+
mapping = {immutable_list: list, tuple: list, immutable_dict: dict}
|
| 322 |
+
return pytree.TreeSpec(
|
| 323 |
+
mapping.get(s.type, s.type),
|
| 324 |
+
s.context,
|
| 325 |
+
list(map(norm_spec, s.children_specs)),
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
flat, spec = pytree.tree_flatten([args, kwargs])
|
| 329 |
+
spec = norm_spec(spec)
|
| 330 |
+
return flat, spec
|
| 331 |
+
|
| 332 |
+
def __repr__(self):
|
| 333 |
+
args = [
|
| 334 |
+
self.fns_repr(),
|
| 335 |
+
*map(repr, self.args),
|
| 336 |
+
*[f"{k}={v}" for k, v in self.kwargs.items()],
|
| 337 |
+
]
|
| 338 |
+
return f"{self.__class__.__name__}({', '.join(args)})"
|
| 339 |
+
|
| 340 |
+
def _match(self, node: torch.fx.Node, ctx: MatchContext):
|
| 341 |
+
if (
|
| 342 |
+
not self._match_fns(node)
|
| 343 |
+
or len(node.args) != len(self.args)
|
| 344 |
+
or len(node.kwargs) != len(self.kwargs)
|
| 345 |
+
):
|
| 346 |
+
return FailedMatch(f"function_mismatch: node={node}, pattern={self}")
|
| 347 |
+
|
| 348 |
+
if not self._match_users(node, ctx):
|
| 349 |
+
return FailedMatch(f"multiple_users {node}")
|
| 350 |
+
|
| 351 |
+
node_items, node_spec = self.flatten(node.args, node.kwargs)
|
| 352 |
+
self_items, self_spec = self.flat_args_kwargs
|
| 353 |
+
if node_spec != self_spec:
|
| 354 |
+
return FailedMatch(f"args_structure {node_spec} {self_spec}")
|
| 355 |
+
assert len(node_items) == len(self_items)
|
| 356 |
+
|
| 357 |
+
m = Match(self)
|
| 358 |
+
for i, pattern, child_node in zip(itertools.count(), self_items, node_items):
|
| 359 |
+
if isinstance(pattern, PatternExpr):
|
| 360 |
+
child_match = ctx.match(pattern, child_node)
|
| 361 |
+
if not child_match:
|
| 362 |
+
return child_match
|
| 363 |
+
m.extend(child_match)
|
| 364 |
+
elif isinstance(child_node, torch.fx.Node) or child_node != pattern:
|
| 365 |
+
return FailedMatch(f"constant_args: {node} {child_node!r}!={pattern!r}")
|
| 366 |
+
m.nodes.append(node)
|
| 367 |
+
m.targets[self] = node.target
|
| 368 |
+
return m
|
| 369 |
+
|
| 370 |
+
def find_anchor_nodes(self, ctx: MatchContext, searched):
|
| 371 |
+
"""
|
| 372 |
+
This is used when we are matching a pattern with multiple outputs.
|
| 373 |
+
There is a partial match (stored in ctx) and we want to walk
|
| 374 |
+
this pattern to find a connection to an already-matched node.
|
| 375 |
+
|
| 376 |
+
Yields candidate nodes that `self._match` might like.
|
| 377 |
+
"""
|
| 378 |
+
if self in ctx.pattern_to_node:
|
| 379 |
+
yield ctx.pattern_to_node[self]
|
| 380 |
+
return
|
| 381 |
+
|
| 382 |
+
for pattern in self.flat_args_kwargs[0]:
|
| 383 |
+
if isinstance(pattern, PatternExpr):
|
| 384 |
+
for other_node in pattern.find_anchor_nodes(ctx, searched):
|
| 385 |
+
if not isinstance(other_node, torch.fx.Node):
|
| 386 |
+
continue
|
| 387 |
+
for node in other_node.users:
|
| 388 |
+
if node not in searched:
|
| 389 |
+
if self._match_fns(node):
|
| 390 |
+
yield node
|
| 391 |
+
searched.add(node)
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
class CallFunction(_TargetArgsExpr):
|
| 395 |
+
"""
|
| 396 |
+
Matches a call_function node in the FX graphs: `fns[i](*args, **kwargs)`
|
| 397 |
+
"""
|
| 398 |
+
|
| 399 |
+
op = "call_function"
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class CallMethod(_TargetArgsExpr):
|
| 403 |
+
"""
|
| 404 |
+
Matches a call_method node in the FX graphs: `fns[i].method(*args, **kwargs)`
|
| 405 |
+
"""
|
| 406 |
+
|
| 407 |
+
op = "call_method"
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class _TargetExprVarArgs(_TargetExpr):
|
| 411 |
+
"""
|
| 412 |
+
Matches a call_function node with any arguments which are passed into the pattern
|
| 413 |
+
"""
|
| 414 |
+
|
| 415 |
+
def _match(self, node: torch.fx.Node, ctx: MatchContext):
|
| 416 |
+
if not self._match_fns(node):
|
| 417 |
+
return FailedMatch("function_mismatch")
|
| 418 |
+
|
| 419 |
+
if not self._match_users(node, ctx):
|
| 420 |
+
return FailedMatch("multiple_users")
|
| 421 |
+
|
| 422 |
+
m = Match(self)
|
| 423 |
+
m.nodes.append(node)
|
| 424 |
+
m.targets[self] = node.target
|
| 425 |
+
m.args.extend(node.args)
|
| 426 |
+
m.kwargs.update(node.kwargs)
|
| 427 |
+
return m
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class CallFunctionVarArgs(_TargetExprVarArgs):
|
| 431 |
+
op = "call_function"
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class CallMethodVarArgs(_TargetExprVarArgs):
|
| 435 |
+
op = "call_method"
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
class ListOf(PatternExpr):
|
| 439 |
+
"""
|
| 440 |
+
Matches a repeated pattern
|
| 441 |
+
"""
|
| 442 |
+
|
| 443 |
+
def __init__(self, pattern, partial=False):
|
| 444 |
+
super().__init__()
|
| 445 |
+
assert isinstance(pattern, PatternExpr)
|
| 446 |
+
self.pattern = pattern
|
| 447 |
+
self.partial = partial
|
| 448 |
+
|
| 449 |
+
def __repr__(self):
|
| 450 |
+
return f"{self.__class__.__name__}({self.pattern})"
|
| 451 |
+
|
| 452 |
+
def _match(self, node: List[torch.fx.Node], ctx: MatchContext):
|
| 453 |
+
if not isinstance(node, (list, tuple)) or len(node) == 0:
|
| 454 |
+
return FailedMatch("non_list")
|
| 455 |
+
m = Match(self)
|
| 456 |
+
# Propogating patterns with multiple users will ensure we don't revisit
|
| 457 |
+
# the same nodes
|
| 458 |
+
pattern_to_node = ctx.filter_multi_user_patterns()
|
| 459 |
+
matched = False
|
| 460 |
+
for i, child_node in enumerate(node):
|
| 461 |
+
child_ctx = MatchContext(
|
| 462 |
+
ctx.outputs, pattern_to_node, graph=child_node.graph
|
| 463 |
+
)
|
| 464 |
+
child_match = child_ctx.match(self.pattern, child_node)
|
| 465 |
+
pattern_to_node = child_ctx.filter_multi_user_patterns()
|
| 466 |
+
if not child_match:
|
| 467 |
+
if not self.partial:
|
| 468 |
+
return FailedMatch(f"list[{i}]: {child_match}")
|
| 469 |
+
continue
|
| 470 |
+
matched = True
|
| 471 |
+
m.extend(child_match.bundle())
|
| 472 |
+
if not matched:
|
| 473 |
+
return FailedMatch("list: no_match")
|
| 474 |
+
return m.bundle()
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
class MultiOutputPattern(PatternExpr):
|
| 478 |
+
def __init__(self, outputs):
|
| 479 |
+
super().__init__()
|
| 480 |
+
assert all(isinstance(x, (PatternExpr, type(None))) for x in outputs), outputs
|
| 481 |
+
self.outputs = outputs
|
| 482 |
+
|
| 483 |
+
@property
|
| 484 |
+
def fns(self):
|
| 485 |
+
return self.outputs[0].fns
|
| 486 |
+
|
| 487 |
+
def __repr__(self):
|
| 488 |
+
return f"{self.__class__.__name__}({self.outputs})"
|
| 489 |
+
|
| 490 |
+
def _match(self, node: torch.fx.Node, ctx: MatchContext):
|
| 491 |
+
m = ctx.match(self.outputs[0], node)
|
| 492 |
+
if not m:
|
| 493 |
+
return m
|
| 494 |
+
|
| 495 |
+
for pattern in self.outputs[1:]:
|
| 496 |
+
if pattern is None:
|
| 497 |
+
continue
|
| 498 |
+
child_match = self._match_from_anchors(pattern, ctx)
|
| 499 |
+
if not child_match:
|
| 500 |
+
return child_match
|
| 501 |
+
m.extend(child_match)
|
| 502 |
+
|
| 503 |
+
return m
|
| 504 |
+
|
| 505 |
+
def _match_from_anchors(self, pattern, ctx):
|
| 506 |
+
prior = dict(ctx.pattern_to_node)
|
| 507 |
+
m = FailedMatch("no anchor found")
|
| 508 |
+
for node in pattern.find_anchor_nodes(ctx, set()):
|
| 509 |
+
m = ctx.match(pattern, node)
|
| 510 |
+
if m:
|
| 511 |
+
return m
|
| 512 |
+
# revert any partial matches
|
| 513 |
+
ctx.pattern_to_node = dict(prior)
|
| 514 |
+
return m
|
| 515 |
+
|
| 516 |
+
def match(self, node: torch.fx.Node) -> Union[Match, FailedMatch]:
|
| 517 |
+
try:
|
| 518 |
+
return MatchContext(self.outputs, graph=node.graph).match(self, node)
|
| 519 |
+
except FailedMatch as e:
|
| 520 |
+
return e
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class RepeatedExpr(PatternExpr):
|
| 524 |
+
"""
|
| 525 |
+
Checks for a repeated pattern. Useful for repeated operations after a node such as `split` or `unbind`
|
| 526 |
+
"""
|
| 527 |
+
|
| 528 |
+
def __init__(self, inner_pattern):
|
| 529 |
+
super().__init__()
|
| 530 |
+
assert isinstance(inner_pattern, PatternExpr)
|
| 531 |
+
self.inner_pattern = inner_pattern
|
| 532 |
+
|
| 533 |
+
@property
|
| 534 |
+
def fns(self):
|
| 535 |
+
return self.inner_pattern.fns
|
| 536 |
+
|
| 537 |
+
def _match(self, node: torch.fx.Node, ctx: MatchContext):
|
| 538 |
+
m = ctx.match(self.inner_pattern, node)
|
| 539 |
+
if not m:
|
| 540 |
+
return m
|
| 541 |
+
ctx.pattern_to_node.pop(
|
| 542 |
+
self.inner_pattern,
|
| 543 |
+
)
|
| 544 |
+
# Check all anchor nodes match the pattern
|
| 545 |
+
for anchor_node in self.inner_pattern.find_anchor_nodes(ctx, set()):
|
| 546 |
+
anchor_m = MatchContext([self], graph=node.graph).match(
|
| 547 |
+
self.inner_pattern, anchor_node
|
| 548 |
+
)
|
| 549 |
+
if not anchor_m:
|
| 550 |
+
return anchor_m
|
| 551 |
+
m.extend(anchor_m)
|
| 552 |
+
return m
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@dataclasses.dataclass
|
| 556 |
+
class PatternEntry:
|
| 557 |
+
pattern: PatternExpr
|
| 558 |
+
extra_check: Callable[[Match], bool]
|
| 559 |
+
|
| 560 |
+
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
|
| 561 |
+
raise NotImplementedError()
|
| 562 |
+
|
| 563 |
+
def register(self, pass_dicts, target=None, prepend=False):
|
| 564 |
+
if target is None:
|
| 565 |
+
for fn in self.pattern.fns:
|
| 566 |
+
self.register(pass_dicts, fn, prepend=prepend)
|
| 567 |
+
elif isinstance(pass_dicts, (dict, PatternMatcherPass)):
|
| 568 |
+
if prepend:
|
| 569 |
+
pass_dicts[target].insert(0, self)
|
| 570 |
+
else:
|
| 571 |
+
pass_dicts[target].append(self)
|
| 572 |
+
else:
|
| 573 |
+
for x in pass_dicts:
|
| 574 |
+
self.register(x, target, prepend=prepend)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@dataclasses.dataclass
|
| 578 |
+
class LoweringPatternEntry(PatternEntry):
|
| 579 |
+
handler: Any
|
| 580 |
+
|
| 581 |
+
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
|
| 582 |
+
handler = functools.wraps(self.handler)(functools.partial(self.handler, match))
|
| 583 |
+
with graph.inserting_before(node):
|
| 584 |
+
replacement = graph.call_function(handler, tuple(match.args), match.kwargs)
|
| 585 |
+
replacement.meta.update(node.meta)
|
| 586 |
+
node.replace_all_uses_with(replacement)
|
| 587 |
+
assert match.nodes[-1] is node
|
| 588 |
+
match.erase_nodes(graph)
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
@dataclasses.dataclass
|
| 592 |
+
class GraphPatternEntry(PatternEntry):
|
| 593 |
+
"""
|
| 594 |
+
A pattern that runs a function on the FX graph
|
| 595 |
+
"""
|
| 596 |
+
|
| 597 |
+
handler: Any
|
| 598 |
+
|
| 599 |
+
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
|
| 600 |
+
with graph.inserting_before(node):
|
| 601 |
+
self.handler(match, *match.args, **match.kwargs)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
@dataclasses.dataclass
|
| 605 |
+
class ReplacementPatternEntry(PatternEntry):
|
| 606 |
+
normalize_args: Callable
|
| 607 |
+
|
| 608 |
+
@staticmethod
|
| 609 |
+
def replace_with_graph(
|
| 610 |
+
match: Match,
|
| 611 |
+
graph: torch.fx.Graph,
|
| 612 |
+
replacement_graph: torch.fx.Graph,
|
| 613 |
+
args: List[Any],
|
| 614 |
+
):
|
| 615 |
+
output_nodes = match.output_nodes()
|
| 616 |
+
first_node = output_nodes[0]
|
| 617 |
+
|
| 618 |
+
class Replacer(torch.fx.Interpreter):
|
| 619 |
+
call_method = None
|
| 620 |
+
call_module = None
|
| 621 |
+
get_attr = None
|
| 622 |
+
|
| 623 |
+
def run_node(self, node) -> Any:
|
| 624 |
+
if node.op in ("placeholder", "output"):
|
| 625 |
+
return super().run_node(node)
|
| 626 |
+
if node.op == "call_function":
|
| 627 |
+
target = node.target
|
| 628 |
+
args, kwargs = self.fetch_args_kwargs_from_env(node)
|
| 629 |
+
result = graph.call_function(target, args, kwargs)
|
| 630 |
+
if "val" in node.meta and "val" not in result.meta:
|
| 631 |
+
result.meta["val"] = node.meta["val"]
|
| 632 |
+
if isinstance(node.meta["val"], torch.Tensor):
|
| 633 |
+
assert "tensor_meta" in node.meta
|
| 634 |
+
result.meta["tensor_meta"] = node.meta["tensor_meta"]
|
| 635 |
+
return result
|
| 636 |
+
raise NotImplementedError(f"unhandled {node}")
|
| 637 |
+
|
| 638 |
+
output_nodes = match.output_nodes()
|
| 639 |
+
|
| 640 |
+
if len(output_nodes) == 1:
|
| 641 |
+
last_node = output_nodes[0]
|
| 642 |
+
else:
|
| 643 |
+
nodes = list(output_nodes[0].graph.nodes)
|
| 644 |
+
indices = [
|
| 645 |
+
(nodes.index(n), n)
|
| 646 |
+
for n in output_nodes
|
| 647 |
+
if isinstance(n, torch.fx.Node)
|
| 648 |
+
]
|
| 649 |
+
last_node = min(indices, key=lambda tup: tup[0])[1]
|
| 650 |
+
|
| 651 |
+
def percolate_tags(node, recompute_tag):
|
| 652 |
+
for arg in node.all_input_nodes:
|
| 653 |
+
if hasattr(arg, "meta"):
|
| 654 |
+
arg.meta["recompute"] = recompute_tag
|
| 655 |
+
percolate_tags(arg, recompute_tag)
|
| 656 |
+
|
| 657 |
+
with graph.inserting_before(last_node):
|
| 658 |
+
replacement = Replacer(replacement_graph).run(*args)
|
| 659 |
+
if isinstance(replacement, torch.fx.Node):
|
| 660 |
+
replacement = [replacement]
|
| 661 |
+
assert len(replacement) == len(output_nodes)
|
| 662 |
+
for old, new in zip(output_nodes, replacement):
|
| 663 |
+
if old is None:
|
| 664 |
+
assert new is None
|
| 665 |
+
elif new is None:
|
| 666 |
+
old.replace_all_uses_with(None)
|
| 667 |
+
else:
|
| 668 |
+
if "val" not in new.meta:
|
| 669 |
+
new.meta.update(old.meta)
|
| 670 |
+
|
| 671 |
+
# Preserve the recompute tags in the replacement graph. We
|
| 672 |
+
# look at the recompute tags of the original output node to
|
| 673 |
+
# propagate the tag from the output all the way to the input
|
| 674 |
+
# args in the replacement graph.
|
| 675 |
+
# Note that this is best effort. Since patterns are from
|
| 676 |
+
# many to many, there is no easy way to correctly map the
|
| 677 |
+
# recomputable tags. It is possible in some scenarios that we
|
| 678 |
+
# incorrectly tag some nodes as recomputables.
|
| 679 |
+
if "recompute" in old.meta:
|
| 680 |
+
percolate_tags(new, old.meta["recompute"])
|
| 681 |
+
|
| 682 |
+
old.replace_all_uses_with(new)
|
| 683 |
+
|
| 684 |
+
match.erase_nodes(graph)
|
| 685 |
+
|
| 686 |
+
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node):
|
| 687 |
+
self.replace_with_graph(
|
| 688 |
+
match,
|
| 689 |
+
graph,
|
| 690 |
+
match.replacement_graph,
|
| 691 |
+
self.normalize_args(*match.args, **match.kwargs),
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
def _return_true(match):
|
| 696 |
+
return True
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def register_replacement(
|
| 700 |
+
search_fn,
|
| 701 |
+
replace_fn,
|
| 702 |
+
example_inputs,
|
| 703 |
+
trace_fn,
|
| 704 |
+
pass_dict,
|
| 705 |
+
extra_check=_return_true,
|
| 706 |
+
scalar_workaround=(),
|
| 707 |
+
exclusive_arg_names=(),
|
| 708 |
+
):
|
| 709 |
+
"""
|
| 710 |
+
Create a replacement rule based on example functions that get traced
|
| 711 |
+
to create patterns. This supports both training and inference when
|
| 712 |
+
run on a joint foward+backward graph.
|
| 713 |
+
|
| 714 |
+
Args:
|
| 715 |
+
search_fn: traced to give original pattern
|
| 716 |
+
replace_fn: traced to give replacement graph
|
| 717 |
+
example_inputs: example inputs for initial trace
|
| 718 |
+
trace_fn: inference_graph or training_graph
|
| 719 |
+
pass_dict: dict of passes to register to
|
| 720 |
+
extra_check: additional check to run on match(using real shapes)
|
| 721 |
+
"""
|
| 722 |
+
|
| 723 |
+
def check_fn(match: Match):
|
| 724 |
+
"""
|
| 725 |
+
Often shapes get burned into the pattern, so our initial match ran with
|
| 726 |
+
`ignore_types=(int, ...)`.
|
| 727 |
+
|
| 728 |
+
Recheck the match with the correct shapes.
|
| 729 |
+
"""
|
| 730 |
+
args = list(
|
| 731 |
+
torch.fx.map_arg(
|
| 732 |
+
[match.kwargs[name] for name in argnames], lambda n: n.meta["val"]
|
| 733 |
+
)
|
| 734 |
+
)
|
| 735 |
+
for i, grad in enumerate(requires_grad):
|
| 736 |
+
if isinstance(args[i], torch.Tensor):
|
| 737 |
+
if grad and is_integer_dtype(args[i].dtype):
|
| 738 |
+
return False
|
| 739 |
+
|
| 740 |
+
with torch._dynamo.utils.detect_fake_mode(args):
|
| 741 |
+
args[i] = torch.empty_strided(
|
| 742 |
+
args[i].size(),
|
| 743 |
+
args[i].stride(),
|
| 744 |
+
dtype=args[i].dtype,
|
| 745 |
+
device=args[i].device,
|
| 746 |
+
requires_grad=grad,
|
| 747 |
+
)
|
| 748 |
+
specific_graph = trace_fn(search_fn, args)
|
| 749 |
+
specific_pattern = fx_to_pattern(
|
| 750 |
+
specific_graph, argnames=argnames, exclusive_arg_names=exclusive_arg_names
|
| 751 |
+
)
|
| 752 |
+
specific_pattern_match = specific_pattern.match(match.output_nodes()[0])
|
| 753 |
+
if specific_pattern_match and extra_check(specific_pattern_match):
|
| 754 |
+
# trace the pattern using the shapes form the user program
|
| 755 |
+
match.replacement_graph = trace_fn(replace_fn, args)
|
| 756 |
+
return True
|
| 757 |
+
return False
|
| 758 |
+
|
| 759 |
+
def normalize_args(**kwargs):
|
| 760 |
+
args = []
|
| 761 |
+
for name in argnames:
|
| 762 |
+
args.append(kwargs.pop(name))
|
| 763 |
+
for i in range(1, len(kwargs) + 1):
|
| 764 |
+
args.append(kwargs.pop(f"tangents_{i}"))
|
| 765 |
+
assert not kwargs, f"leftover kwargs: {kwargs!r}"
|
| 766 |
+
return args
|
| 767 |
+
|
| 768 |
+
# TODO: Revisit the functionalize_rng_ops for lowmem dropout
|
| 769 |
+
with functorch_config.patch(functionalize_rng_ops=False):
|
| 770 |
+
argnames = [*inspect.signature(search_fn).parameters.keys()]
|
| 771 |
+
requires_grad = [
|
| 772 |
+
isinstance(x, torch.Tensor) and x.requires_grad for x in example_inputs
|
| 773 |
+
]
|
| 774 |
+
search_gm = trace_fn(search_fn, example_inputs)
|
| 775 |
+
pattern = fx_to_pattern(
|
| 776 |
+
search_gm,
|
| 777 |
+
ignore_types=(int, float, list, torch.device, torch.dtype),
|
| 778 |
+
argnames=argnames,
|
| 779 |
+
scalar_workaround=scalar_workaround,
|
| 780 |
+
exclusive_arg_names=exclusive_arg_names,
|
| 781 |
+
)
|
| 782 |
+
assert repr(pattern) not in _seen_patterns
|
| 783 |
+
_seen_patterns.add(repr(pattern))
|
| 784 |
+
pattern = ReplacementPatternEntry(
|
| 785 |
+
pattern=pattern,
|
| 786 |
+
extra_check=check_fn,
|
| 787 |
+
normalize_args=normalize_args,
|
| 788 |
+
)
|
| 789 |
+
pattern.register(pass_dict)
|
| 790 |
+
|
| 791 |
+
|
| 792 |
+
def register_lowering_pattern(
|
| 793 |
+
pattern, extra_check=_return_true, *, pass_dict, prepend=False
|
| 794 |
+
):
|
| 795 |
+
"""
|
| 796 |
+
Register an aten to inductor IR replacement pattern. The decorated
|
| 797 |
+
function is saved and then called a lowering time allowing direct
|
| 798 |
+
pattern to inductor IR conversion.
|
| 799 |
+
"""
|
| 800 |
+
|
| 801 |
+
def decorator(handler):
|
| 802 |
+
assert callable(handler)
|
| 803 |
+
LoweringPatternEntry(
|
| 804 |
+
pattern=pattern, extra_check=extra_check, handler=handler
|
| 805 |
+
).register(pass_dict, prepend=prepend)
|
| 806 |
+
handler._inductor_lowering_function = True
|
| 807 |
+
return handler
|
| 808 |
+
|
| 809 |
+
return decorator
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def register_graph_pattern(
|
| 813 |
+
pattern, extra_check=_return_true, *, pass_dict, prepend=False
|
| 814 |
+
):
|
| 815 |
+
"""
|
| 816 |
+
Register a pattern that runs a function on the FX graph, allowing
|
| 817 |
+
custom transformation code.
|
| 818 |
+
"""
|
| 819 |
+
|
| 820 |
+
def decorator(handler):
|
| 821 |
+
assert callable(handler)
|
| 822 |
+
GraphPatternEntry(
|
| 823 |
+
pattern=pattern, extra_check=extra_check, handler=handler
|
| 824 |
+
).register(pass_dict, prepend=prepend)
|
| 825 |
+
return handler
|
| 826 |
+
|
| 827 |
+
return decorator
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
def is_start_of_fx_graph(graph, node):
|
| 831 |
+
# first node in the graph
|
| 832 |
+
return node is next(iter(graph.nodes))
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
# match: copy_, relu_, _set_grad_enabled, manual_seed, enter_functional_autocast, etc
|
| 836 |
+
_mutation_op_re = re.compile(r"_$|(\b|_)(set|enter|exit|seed)(\b|_)")
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
def is_mutation_op(node):
|
| 840 |
+
if node.op == "call_function":
|
| 841 |
+
if _mutation_op_re.search(node.target.__name__):
|
| 842 |
+
return True
|
| 843 |
+
elif node.op == "call_method":
|
| 844 |
+
if _mutation_op_re.search(node.target):
|
| 845 |
+
return True
|
| 846 |
+
return node.kwargs.get("out") is not None
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def get_mutation_region_id(graph, node):
|
| 850 |
+
n = node
|
| 851 |
+
while "mutation_region_id" not in n.meta and not is_start_of_fx_graph(graph, n):
|
| 852 |
+
n = n.prev
|
| 853 |
+
mutation_region_id = n.meta.get("mutation_region_id", 0)
|
| 854 |
+
while n is not node:
|
| 855 |
+
n = n.next
|
| 856 |
+
if is_mutation_op(n):
|
| 857 |
+
mutation_region_id += 1
|
| 858 |
+
n.meta["mutation_region_id"] = mutation_region_id
|
| 859 |
+
return mutation_region_id
|
| 860 |
+
|
| 861 |
+
|
| 862 |
+
def should_compute_mutation_region_ids(graph):
|
| 863 |
+
return "mutation_region_id" not in next(iter(graph.nodes)).meta
|
| 864 |
+
|
| 865 |
+
|
| 866 |
+
def compute_mutation_region_ids(graph):
|
| 867 |
+
mutation_region_id = 0
|
| 868 |
+
for nd in graph.nodes:
|
| 869 |
+
if is_mutation_op(nd):
|
| 870 |
+
mutation_region_id += 1
|
| 871 |
+
nd.meta["mutation_region_id"] = mutation_region_id
|
| 872 |
+
|
| 873 |
+
|
| 874 |
+
class PatternMatcherPass:
|
| 875 |
+
def __init__(self, prevent_match_across_mutations=False):
|
| 876 |
+
super().__init__()
|
| 877 |
+
self.patterns = defaultdict(list)
|
| 878 |
+
self.prevent_match_across_mutations = prevent_match_across_mutations
|
| 879 |
+
|
| 880 |
+
def __getitem__(self, item):
|
| 881 |
+
return self.patterns[item]
|
| 882 |
+
|
| 883 |
+
def apply(self, graph):
|
| 884 |
+
if not self.patterns:
|
| 885 |
+
return 0
|
| 886 |
+
if isinstance(graph, torch.fx.GraphModule):
|
| 887 |
+
graph = graph.graph
|
| 888 |
+
if self.prevent_match_across_mutations:
|
| 889 |
+
if should_compute_mutation_region_ids(graph):
|
| 890 |
+
compute_mutation_region_ids(graph)
|
| 891 |
+
get_mutation_region_id_partial = functools.partial(
|
| 892 |
+
get_mutation_region_id, graph
|
| 893 |
+
)
|
| 894 |
+
count = 0
|
| 895 |
+
for node in reversed(graph.nodes):
|
| 896 |
+
if (
|
| 897 |
+
node.op in ["call_function", "call_method"]
|
| 898 |
+
and node.target in self.patterns
|
| 899 |
+
):
|
| 900 |
+
# conservatively not applying pattern for cpu input,
|
| 901 |
+
# since some of the patterns induce codegen and split nodes.
|
| 902 |
+
# Note: we will only skip cpu compute if disable_cpp_codegen=True
|
| 903 |
+
if fallback_node_due_to_unsupported_type(node, allow_cpu_inputs=False):
|
| 904 |
+
continue
|
| 905 |
+
|
| 906 |
+
for entry in self.patterns[node.target]:
|
| 907 |
+
if node._erased:
|
| 908 |
+
break
|
| 909 |
+
m = entry.pattern.match(node)
|
| 910 |
+
# pattern match crosses mutation barrier - discard
|
| 911 |
+
if (
|
| 912 |
+
self.prevent_match_across_mutations
|
| 913 |
+
and m
|
| 914 |
+
and len(set(map(get_mutation_region_id_partial, m.nodes))) != 1
|
| 915 |
+
):
|
| 916 |
+
continue
|
| 917 |
+
if os.environ.get("TORCHINDUCTOR_PATTERN_MATCH_DEBUG") == node.name:
|
| 918 |
+
log.warning("%s%s %s %s", node, node.args, m, entry.pattern)
|
| 919 |
+
if m and entry.extra_check(m):
|
| 920 |
+
count += 1
|
| 921 |
+
entry.apply(m, graph, node)
|
| 922 |
+
counters["inductor"]["pattern_matcher_count"] += 1
|
| 923 |
+
counters["inductor"]["pattern_matcher_nodes"] += len(m.nodes)
|
| 924 |
+
return count
|
| 925 |
+
|
| 926 |
+
def clear(self):
|
| 927 |
+
self.patterns.clear()
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
def _not_implemented(*args, **kwargs):
|
| 931 |
+
raise NotImplementedError()
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
def fx_to_pattern(
|
| 935 |
+
gm, ignore_types=(), argnames=(), scalar_workaround=(), exclusive_arg_names=()
|
| 936 |
+
):
|
| 937 |
+
"""
|
| 938 |
+
Convert an FX graph into a PatternExpr. This is useful for simple
|
| 939 |
+
patterns that can only match single functions and fixed length lists.
|
| 940 |
+
"""
|
| 941 |
+
# scalar_workaround is a hack to capture dropout_p
|
| 942 |
+
# see https://github.com/pytorch/pytorch/issues/97894
|
| 943 |
+
scalar_workaround = scalar_workaround or {}
|
| 944 |
+
inv_scalar_workaround = {v: k for k, v in scalar_workaround.items()}
|
| 945 |
+
assert len(inv_scalar_workaround) == len(scalar_workaround)
|
| 946 |
+
|
| 947 |
+
def process_arg(x):
|
| 948 |
+
if isinstance(x, (float, int)) and x in inv_scalar_workaround:
|
| 949 |
+
return KeywordArg(inv_scalar_workaround[x])
|
| 950 |
+
if type(x) in ignore_types:
|
| 951 |
+
return Ignored()
|
| 952 |
+
if isinstance(x, list) and all(isinstance(y, Ignored) for y in x) and x:
|
| 953 |
+
return Ignored()
|
| 954 |
+
return x
|
| 955 |
+
|
| 956 |
+
argnum = itertools.count()
|
| 957 |
+
|
| 958 |
+
class Converter(torch.fx.Interpreter):
|
| 959 |
+
call_method = _not_implemented
|
| 960 |
+
call_module = _not_implemented
|
| 961 |
+
get_attr = _not_implemented
|
| 962 |
+
|
| 963 |
+
def placeholder(self, target, args, kwargs):
|
| 964 |
+
n = next(argnum)
|
| 965 |
+
if n < len(argnames):
|
| 966 |
+
name = argnames[n]
|
| 967 |
+
elif argnames:
|
| 968 |
+
assert target.startswith("tangent")
|
| 969 |
+
name = target
|
| 970 |
+
else:
|
| 971 |
+
target = re.sub(r"_\d+$", "", target) # de-mangle arg name
|
| 972 |
+
name = target
|
| 973 |
+
if name in exclusive_arg_names:
|
| 974 |
+
return ExclusiveKeywordArg(name)
|
| 975 |
+
else:
|
| 976 |
+
return KeywordArg(name)
|
| 977 |
+
|
| 978 |
+
def call_function(self, target, args, kwargs):
|
| 979 |
+
args, kwargs = pytree.tree_map(process_arg, (args, kwargs))
|
| 980 |
+
if list in ignore_types:
|
| 981 |
+
# Handle a burned in tensor size which are now [Ignored(), Ignored(), ...]
|
| 982 |
+
args = [process_arg(a) for a in args]
|
| 983 |
+
kwargs = {k: process_arg(a) for k, a in kwargs.items()}
|
| 984 |
+
return CallFunction(target, *args, **kwargs)
|
| 985 |
+
|
| 986 |
+
def run_node(self, n):
|
| 987 |
+
rv = super().run_node(n)
|
| 988 |
+
if n.op == "output" and isinstance(rv, tuple):
|
| 989 |
+
assert len(rv) == len(n.args[0])
|
| 990 |
+
for r, arg in zip(rv, n.args[0]):
|
| 991 |
+
r.users = len(arg.users)
|
| 992 |
+
else:
|
| 993 |
+
rv.users = len(n.users)
|
| 994 |
+
return rv
|
| 995 |
+
|
| 996 |
+
pattern = Converter(gm).run()
|
| 997 |
+
if not isinstance(pattern, PatternExpr):
|
| 998 |
+
return MultiOutputPattern(pytree.tree_flatten(pattern)[0])
|
| 999 |
+
return pattern
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
@torch.no_grad()
|
| 1003 |
+
def inference_graph(fn, args):
|
| 1004 |
+
"""Build a normalized inference graph, for use with fx_to_pattern"""
|
| 1005 |
+
gm = make_fx(fn, select_decomp_table())(*args)
|
| 1006 |
+
gm.graph.eliminate_dead_code()
|
| 1007 |
+
gm.recompile()
|
| 1008 |
+
return gm
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
@torch.enable_grad()
|
| 1012 |
+
def training_graph(fn, args):
|
| 1013 |
+
"""Build a normalized training graph, for use with fx_to_pattern"""
|
| 1014 |
+
gm = None
|
| 1015 |
+
|
| 1016 |
+
def record_joint_graph(joint_graph, inputs, **kwargs):
|
| 1017 |
+
nonlocal gm
|
| 1018 |
+
assert not gm
|
| 1019 |
+
gm = clone_graph(joint_graph)
|
| 1020 |
+
return default_partition(joint_graph, inputs, **kwargs)
|
| 1021 |
+
|
| 1022 |
+
with torch._guards.tracing(None):
|
| 1023 |
+
aot_function(
|
| 1024 |
+
fn,
|
| 1025 |
+
lambda g, i: make_boxed_func(g),
|
| 1026 |
+
partition_fn=record_joint_graph,
|
| 1027 |
+
decompositions=select_decomp_table(),
|
| 1028 |
+
enable_log=False,
|
| 1029 |
+
)(*args)
|
| 1030 |
+
|
| 1031 |
+
from .fx_passes.joint_graph import pointless_view
|
| 1032 |
+
|
| 1033 |
+
matcher_pass = PatternMatcherPass()
|
| 1034 |
+
|
| 1035 |
+
pattern = CallFunction(
|
| 1036 |
+
torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")
|
| 1037 |
+
)
|
| 1038 |
+
GraphPatternEntry(
|
| 1039 |
+
pattern=pattern, handler=pointless_view, extra_check=_return_true
|
| 1040 |
+
).register(matcher_pass.patterns)
|
| 1041 |
+
matcher_pass.apply(gm.graph)
|
| 1042 |
+
|
| 1043 |
+
# remove in/out specs
|
| 1044 |
+
gm.graph._codegen = torch.fx.graph.CodeGen()
|
| 1045 |
+
gm.graph.eliminate_dead_code()
|
| 1046 |
+
gm.recompile()
|
| 1047 |
+
return gm
|
| 1048 |
+
|
| 1049 |
+
|
| 1050 |
+
def _args(n: torch.fx.Node):
|
| 1051 |
+
args = list()
|
| 1052 |
+
torch.fx.map_arg((n.args, n.kwargs), args.append)
|
| 1053 |
+
return args
|
| 1054 |
+
|
| 1055 |
+
|
| 1056 |
+
def stable_topological_sort(graph: torch.fx.Graph):
|
| 1057 |
+
waiting = defaultdict(list)
|
| 1058 |
+
ready = set()
|
| 1059 |
+
cursor = None
|
| 1060 |
+
|
| 1061 |
+
def check(node):
|
| 1062 |
+
waiting_for = [x for x in _args(node) if x not in ready]
|
| 1063 |
+
if waiting_for:
|
| 1064 |
+
# revisit this node when next input is ready
|
| 1065 |
+
waiting[waiting_for[0]].append(node)
|
| 1066 |
+
else:
|
| 1067 |
+
nonlocal cursor
|
| 1068 |
+
cursor = node
|
| 1069 |
+
ready.add(node)
|
| 1070 |
+
for other in waiting.pop(node, ()):
|
| 1071 |
+
cursor.append(other)
|
| 1072 |
+
check(other)
|
| 1073 |
+
|
| 1074 |
+
for n in list(graph.nodes):
|
| 1075 |
+
check(n)
|
| 1076 |
+
assert not waiting and len(ready) == len(graph.nodes)
|
| 1077 |
+
|
| 1078 |
+
|
| 1079 |
+
def init_once_fakemode(fn):
|
| 1080 |
+
"""Wrapper around lazy init functions in fx_passes/"""
|
| 1081 |
+
|
| 1082 |
+
@functools.lru_cache(None)
|
| 1083 |
+
@functools.wraps(fn)
|
| 1084 |
+
def lazy_init():
|
| 1085 |
+
counters_ref = counters["inductor"].copy()
|
| 1086 |
+
|
| 1087 |
+
with torch._guards.tracing(
|
| 1088 |
+
None
|
| 1089 |
+
), maybe_disable_fake_tensor_mode(), FakeTensorMode():
|
| 1090 |
+
result = fn()
|
| 1091 |
+
|
| 1092 |
+
# clear view matches encountered during tracing
|
| 1093 |
+
counters["inductor"] = counters_ref
|
| 1094 |
+
|
| 1095 |
+
return result
|
| 1096 |
+
|
| 1097 |
+
return lazy_init
|
| 1098 |
+
|
| 1099 |
+
|
| 1100 |
+
def config_flag(name):
|
| 1101 |
+
"""Function for extra_check to put pass behind a flag"""
|
| 1102 |
+
|
| 1103 |
+
def flag_check(match):
|
| 1104 |
+
return getattr(config, name)
|
| 1105 |
+
|
| 1106 |
+
return flag_check
|
| 1107 |
+
|
| 1108 |
+
|
| 1109 |
+
def clone_graph(input_graph):
|
| 1110 |
+
class CopyGraph(Transformer):
|
| 1111 |
+
def run_node(self, old_node):
|
| 1112 |
+
new_node = super().run_node(old_node)
|
| 1113 |
+
if isinstance(new_node, torch.fx.Proxy):
|
| 1114 |
+
new_node.node.meta.update(old_node.meta)
|
| 1115 |
+
new_node.node.name = self.new_graph._graph_namespace.create_name(
|
| 1116 |
+
old_node.name, None
|
| 1117 |
+
)
|
| 1118 |
+
return new_node
|
| 1119 |
+
|
| 1120 |
+
return CopyGraph(input_graph).transform()
|
| 1121 |
+
|
| 1122 |
+
|
| 1123 |
+
_seen_patterns = set()
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
def get_arg_value(node, arg_number, kwarg_name=None):
|
| 1127 |
+
return (
|
| 1128 |
+
node.args[arg_number]
|
| 1129 |
+
if len(node.args) > arg_number
|
| 1130 |
+
else node.kwargs.get(kwarg_name)
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def filter_nodes(nodes, fn):
|
| 1135 |
+
fns = [fn]
|
| 1136 |
+
if isinstance(fn, torch._ops.OpOverloadPacket):
|
| 1137 |
+
fns.extend([getattr(fn, overload) for overload in fn.overloads()])
|
| 1138 |
+
|
| 1139 |
+
return [node for node in nodes if node.target in fns]
|
| 1140 |
+
|
| 1141 |
+
|
| 1142 |
+
def same_layout(node1: torch.fx.Node, node2: torch.fx.Node):
|
| 1143 |
+
"""True if two nodes have the same size/strides"""
|
| 1144 |
+
val1 = node1.meta.get("val")
|
| 1145 |
+
val2 = node2.meta.get("val")
|
| 1146 |
+
return (
|
| 1147 |
+
val1 is not None
|
| 1148 |
+
and val2 is not None
|
| 1149 |
+
and val1.size() == val2.size()
|
| 1150 |
+
and val1.stride() == val2.stride()
|
| 1151 |
+
)
|
| 1152 |
+
|
| 1153 |
+
|
| 1154 |
+
def remove_extra_clones(graph: torch.fx.Graph):
|
| 1155 |
+
seen = set()
|
| 1156 |
+
for node in reversed(graph.nodes):
|
| 1157 |
+
if node.target is aten.clone.default:
|
| 1158 |
+
src = node.args[0]
|
| 1159 |
+
if (
|
| 1160 |
+
isinstance(src, torch.fx.Node)
|
| 1161 |
+
and src.op == "call_function"
|
| 1162 |
+
and isinstance(src.target, torch._ops.OpOverload)
|
| 1163 |
+
and not src.target.is_view
|
| 1164 |
+
and not any(u in seen for u in src.users)
|
| 1165 |
+
and same_layout(src, node)
|
| 1166 |
+
):
|
| 1167 |
+
node.replace_all_uses_with(src)
|
| 1168 |
+
graph.erase_node(node)
|
| 1169 |
+
seen.add(node)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/quantized_lowerings.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def register_quantized_ops():
|
| 5 |
+
from . import lowering
|
| 6 |
+
|
| 7 |
+
quantized = torch.ops.quantized
|
| 8 |
+
|
| 9 |
+
lowering.add_needs_realized_inputs(
|
| 10 |
+
[
|
| 11 |
+
quantized.max_pool2d,
|
| 12 |
+
]
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
lowering.make_fallback(quantized.max_pool2d)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/scheduler.py
ADDED
|
@@ -0,0 +1,1749 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import pprint
|
| 8 |
+
import textwrap
|
| 9 |
+
from typing import Dict, List, Optional, Set
|
| 10 |
+
|
| 11 |
+
import sympy
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from torch._dynamo.utils import dynamo_timed
|
| 15 |
+
|
| 16 |
+
from . import config, dependencies, ir, metrics
|
| 17 |
+
from .codegen.common import get_scheduling_for_device
|
| 18 |
+
from .dependencies import StarDep, WeakDep
|
| 19 |
+
from .ir import ComputedBuffer
|
| 20 |
+
from .sizevars import SimplifyIndexing
|
| 21 |
+
from .utils import (
|
| 22 |
+
cache_on_self,
|
| 23 |
+
cmp,
|
| 24 |
+
free_symbol_has,
|
| 25 |
+
get_device_tflops,
|
| 26 |
+
get_dtype_size,
|
| 27 |
+
get_gpu_dram_gbps,
|
| 28 |
+
has_triton,
|
| 29 |
+
sympy_product,
|
| 30 |
+
)
|
| 31 |
+
from .virtualized import V
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
log = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def pformat(obj):
|
| 38 |
+
if isinstance(obj, set):
|
| 39 |
+
# pformat has trouble with sets of sympy exprs
|
| 40 |
+
obj = sorted(obj, key=str)
|
| 41 |
+
result = pprint.pformat(obj, indent=4)
|
| 42 |
+
if "\n" in result:
|
| 43 |
+
return f"\n{textwrap.indent(result, ' '*4)}"
|
| 44 |
+
return result
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class OutputNode:
|
| 48 |
+
def __init__(self, dep):
|
| 49 |
+
self.unmet_dependencies = {dep}
|
| 50 |
+
self.inverse_users = []
|
| 51 |
+
|
| 52 |
+
def is_reduction(self):
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
def get_alias_names(self):
|
| 56 |
+
return ()
|
| 57 |
+
|
| 58 |
+
def get_name(self):
|
| 59 |
+
return "OUTPUT"
|
| 60 |
+
|
| 61 |
+
__repr__ = get_name
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def fuse(node1: "BaseSchedulerNode", node2: "BaseSchedulerNode"):
|
| 65 |
+
if node1.is_foreach() or node2.is_foreach():
|
| 66 |
+
return ForeachKernelSchedulerNode.fuse(node1, node2)
|
| 67 |
+
else:
|
| 68 |
+
return FusedSchedulerNode.fuse(node1, node2)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# TODO(xmfan): reuse an existing mapping for this if it exists, or formalize this into ir.py:ExternKernel
|
| 72 |
+
kernel_name_to_op = {
|
| 73 |
+
"extern_kernels.convolution": torch.ops.aten.convolution,
|
| 74 |
+
"extern_kernels.mm": torch.ops.aten.mm,
|
| 75 |
+
"extern_kernels.bmm": torch.ops.aten.bmm,
|
| 76 |
+
"extern_kernels.addmm": torch.ops.aten.addmm,
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class BaseSchedulerNode:
|
| 81 |
+
def __init__(self, scheduler: "Scheduler", node: ir.Buffer):
|
| 82 |
+
self.scheduler: Scheduler = scheduler
|
| 83 |
+
self.node: ir.Buffer = node
|
| 84 |
+
self.users: Optional[List[NodeUser]] = None
|
| 85 |
+
self.inverse_users: List[BaseSchedulerNode] = []
|
| 86 |
+
self.set_read_writes(node.get_read_writes())
|
| 87 |
+
self.recursive_predecessors: Optional[Set[str]] = None
|
| 88 |
+
self.min_order: Optional[int] = None
|
| 89 |
+
self.max_order: Optional[int] = None
|
| 90 |
+
self.last_usage: Set[str] = None # buffers that won't be used after this kernel
|
| 91 |
+
self.written = False
|
| 92 |
+
|
| 93 |
+
def __repr__(self):
|
| 94 |
+
return f"{type(self).__name__}(name={self.get_name()!r})"
|
| 95 |
+
|
| 96 |
+
def debug_str(self) -> str:
|
| 97 |
+
"""Longer form printout for trace logs"""
|
| 98 |
+
name = self.get_name()
|
| 99 |
+
lines = [
|
| 100 |
+
f"{name}: {type(self).__name__}({type(self.node).__name__})",
|
| 101 |
+
f"{name}.writes = {pformat(self.read_writes.writes)}",
|
| 102 |
+
f"{name}.unmet_dependencies = {pformat(self.unmet_dependencies)}",
|
| 103 |
+
f"{name}.met_dependencies = {pformat(self.read_writes.reads - self.unmet_dependencies)}",
|
| 104 |
+
f"{name}.users = {self.users}",
|
| 105 |
+
]
|
| 106 |
+
try:
|
| 107 |
+
lines += [
|
| 108 |
+
self.debug_str_extra(),
|
| 109 |
+
]
|
| 110 |
+
except Exception:
|
| 111 |
+
log.warning("Ignoring error in debug_str()", exc_info=True)
|
| 112 |
+
|
| 113 |
+
return "\n".join(lines).rstrip()
|
| 114 |
+
|
| 115 |
+
def debug_str_extra(self) -> str:
|
| 116 |
+
return ""
|
| 117 |
+
|
| 118 |
+
def log_details(self):
|
| 119 |
+
log.info(
|
| 120 |
+
"%s: unmet_dependencies = %s, writes = %s",
|
| 121 |
+
self,
|
| 122 |
+
self.unmet_dependencies,
|
| 123 |
+
self.read_writes.writes,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
def update_mutated_names(self, renames: Dict[str, str]):
|
| 127 |
+
self.set_read_writes(self.read_writes.rename(renames))
|
| 128 |
+
|
| 129 |
+
def add_mutation_dep(self, dep):
|
| 130 |
+
self.set_read_writes(self.read_writes.with_read(dep))
|
| 131 |
+
|
| 132 |
+
def set_users(self, users: List["NodeUser"]):
|
| 133 |
+
# deduplicate
|
| 134 |
+
result: Dict[int, NodeUser] = {}
|
| 135 |
+
for use in users:
|
| 136 |
+
if id(use.node) in result:
|
| 137 |
+
result[id(use.node)] = use.merge(result[id(use.node)])
|
| 138 |
+
else:
|
| 139 |
+
result[id(use.node)] = use
|
| 140 |
+
self.users = list(result.values())
|
| 141 |
+
|
| 142 |
+
def set_last_usage(
|
| 143 |
+
self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
|
| 144 |
+
):
|
| 145 |
+
used_buffers = self.used_or_aliased_buffer_names()
|
| 146 |
+
used_buffers = {mutation_real_name.get(k, k) for k in used_buffers}
|
| 147 |
+
self.last_usage = used_buffers - future_used_buffers
|
| 148 |
+
|
| 149 |
+
def get_aliases(self):
|
| 150 |
+
return self.node.get_alias_names()
|
| 151 |
+
|
| 152 |
+
def get_mutations(self):
|
| 153 |
+
return self.node.get_mutation_names()
|
| 154 |
+
|
| 155 |
+
def has_aliasing_or_mutation(self):
|
| 156 |
+
return bool(self.get_aliases() or self.get_mutations())
|
| 157 |
+
|
| 158 |
+
def set_read_writes(self, rw: dependencies.ReadWrites):
|
| 159 |
+
self.read_writes: dependencies.ReadWrites = rw
|
| 160 |
+
self.unmet_dependencies = self.read_writes.reads
|
| 161 |
+
self.prune_deps()
|
| 162 |
+
|
| 163 |
+
def op_counts(self):
|
| 164 |
+
return self.read_writes.op_counts
|
| 165 |
+
|
| 166 |
+
def used_buffer_names(self) -> Set[str]:
|
| 167 |
+
return {
|
| 168 |
+
dep.name
|
| 169 |
+
for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes)
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
def used_or_aliased_buffer_names(self) -> Set[str]:
|
| 173 |
+
used_names = set()
|
| 174 |
+
for dep in itertools.chain(self.read_writes.reads, self.read_writes.writes):
|
| 175 |
+
used_names.add(dep.name)
|
| 176 |
+
if V.graph.name_to_buffer.get(dep.name):
|
| 177 |
+
layout = V.graph.name_to_buffer[dep.name].get_layout()
|
| 178 |
+
# needed to avoid deallocating aliased buffer
|
| 179 |
+
# if there are still uses of aliases ahead
|
| 180 |
+
if isinstance(layout, ir.AliasedLayout):
|
| 181 |
+
used_names.add(layout.view.data.get_name())
|
| 182 |
+
return used_names
|
| 183 |
+
|
| 184 |
+
def prune_deps(self):
|
| 185 |
+
self.unmet_dependencies = {
|
| 186 |
+
dep
|
| 187 |
+
for dep in self.unmet_dependencies
|
| 188 |
+
if dep.name not in self.scheduler.available_buffer_names
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
def prune_weak_deps(self):
|
| 192 |
+
# Prune weak dependencies on buffers that have been removed
|
| 193 |
+
def should_prune(dep):
|
| 194 |
+
return isinstance(dep, WeakDep) and dep.name in V.graph.removed_buffers
|
| 195 |
+
|
| 196 |
+
to_remove = {dep for dep in self.read_writes.reads if should_prune(dep)}
|
| 197 |
+
self.set_read_writes(self.read_writes.remove_reads(to_remove))
|
| 198 |
+
|
| 199 |
+
def prune_redundant_deps(self, name_to_fused_node):
|
| 200 |
+
"""
|
| 201 |
+
Prunes stardeps intended for mutation ordering
|
| 202 |
+
on an upstream fused node if after fusion there is another dependency
|
| 203 |
+
on the fused upstream node, making the stardep redundant
|
| 204 |
+
|
| 205 |
+
In essence this enforces an ordering on fusions. As fusions occur, prunable stardeps will
|
| 206 |
+
be incrementally removed, enabling other fusions, ensuring they are fused in order.
|
| 207 |
+
"""
|
| 208 |
+
name_to_dep_count = collections.Counter()
|
| 209 |
+
|
| 210 |
+
for dep in self.unmet_dependencies:
|
| 211 |
+
if not isinstance(dep, WeakDep):
|
| 212 |
+
name_to_dep_count[name_to_fused_node[dep.name].get_name()] += 1
|
| 213 |
+
|
| 214 |
+
def should_prune(dep):
|
| 215 |
+
if isinstance(dep, WeakDep):
|
| 216 |
+
is_redundant = (
|
| 217 |
+
name_to_dep_count[name_to_fused_node[dep.name].get_name()] > 0
|
| 218 |
+
)
|
| 219 |
+
# These can occur because fused nodes always gather deps from their snodes
|
| 220 |
+
# If B has a weakdep on A
|
| 221 |
+
# B gets fused with C, then any time BC is fused, the weakdep will reappear
|
| 222 |
+
is_self_dep = name_to_fused_node[dep.name] == self
|
| 223 |
+
return is_redundant or is_self_dep
|
| 224 |
+
else:
|
| 225 |
+
return False
|
| 226 |
+
|
| 227 |
+
deps_to_prune = {dep for dep in self.unmet_dependencies if should_prune(dep)}
|
| 228 |
+
self.unmet_dependencies = self.unmet_dependencies - deps_to_prune
|
| 229 |
+
self.set_read_writes(self.read_writes.remove_reads(deps_to_prune))
|
| 230 |
+
|
| 231 |
+
def get_name(self) -> str:
|
| 232 |
+
return self.node.get_name()
|
| 233 |
+
|
| 234 |
+
def get_first_name(self) -> str:
|
| 235 |
+
return self.get_name()
|
| 236 |
+
|
| 237 |
+
def get_names(self) -> Set[str]:
|
| 238 |
+
return {self.get_name()}
|
| 239 |
+
|
| 240 |
+
def get_nodes(self) -> List["BaseSchedulerNode"]:
|
| 241 |
+
return [self]
|
| 242 |
+
|
| 243 |
+
def get_device(self):
|
| 244 |
+
return self.node.get_device()
|
| 245 |
+
|
| 246 |
+
def is_reduction(self):
|
| 247 |
+
return False
|
| 248 |
+
|
| 249 |
+
def is_template(self):
|
| 250 |
+
return False
|
| 251 |
+
|
| 252 |
+
def is_extern(self):
|
| 253 |
+
return False
|
| 254 |
+
|
| 255 |
+
def is_foreach(self):
|
| 256 |
+
return False
|
| 257 |
+
|
| 258 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 259 |
+
return False
|
| 260 |
+
|
| 261 |
+
def has_side_effects(self):
|
| 262 |
+
return False
|
| 263 |
+
|
| 264 |
+
def allocate(self):
|
| 265 |
+
if not self.node.should_allocate():
|
| 266 |
+
return
|
| 267 |
+
|
| 268 |
+
if isinstance(self, (SchedulerNode,)) and (
|
| 269 |
+
self.node.get_alias_names() or self.node.get_mutation_names()
|
| 270 |
+
):
|
| 271 |
+
V.graph.wrapper_code.codegen_allocation(self.node)
|
| 272 |
+
return
|
| 273 |
+
|
| 274 |
+
if (
|
| 275 |
+
(
|
| 276 |
+
isinstance(self, (SchedulerNode,))
|
| 277 |
+
# o what have i done. lets make this an api
|
| 278 |
+
or (
|
| 279 |
+
isinstance(self, ExternKernelSchedulerNode)
|
| 280 |
+
and isinstance(self.node, (ir.AllReduce, ir.InPlaceHint))
|
| 281 |
+
)
|
| 282 |
+
)
|
| 283 |
+
and config.inplace_buffers
|
| 284 |
+
and (
|
| 285 |
+
not isinstance(V.kernel, torch._inductor.codegen.triton.TritonKernel)
|
| 286 |
+
or getattr(V.kernel, "mutations", None) is not None
|
| 287 |
+
)
|
| 288 |
+
):
|
| 289 |
+
from .codegen.wrapper import buffer_reuse_key
|
| 290 |
+
|
| 291 |
+
ordered_reads = sorted(self.read_writes.reads, key=lambda x: x.name)
|
| 292 |
+
|
| 293 |
+
for read in ordered_reads:
|
| 294 |
+
input_node: BaseSchedulerNode = self.scheduler.name_to_node.get(
|
| 295 |
+
read.name
|
| 296 |
+
)
|
| 297 |
+
if input_node and V.graph.wrapper_code.can_reuse(input_node, self):
|
| 298 |
+
remaining_uses = [
|
| 299 |
+
x
|
| 300 |
+
for x in input_node.users
|
| 301 |
+
if x.node.get_name()
|
| 302 |
+
not in self.scheduler.available_buffer_names
|
| 303 |
+
]
|
| 304 |
+
if (
|
| 305 |
+
len(remaining_uses) == 1
|
| 306 |
+
and remaining_uses[0].can_inplace
|
| 307 |
+
and remaining_uses[0].node is self
|
| 308 |
+
and not isinstance(
|
| 309 |
+
input_node.node.get_layout(),
|
| 310 |
+
(
|
| 311 |
+
ir.MultiOutputLayout,
|
| 312 |
+
ir.MutationLayout,
|
| 313 |
+
ir.AliasedLayout,
|
| 314 |
+
),
|
| 315 |
+
)
|
| 316 |
+
and buffer_reuse_key(input_node.node)
|
| 317 |
+
== buffer_reuse_key(self.node)
|
| 318 |
+
):
|
| 319 |
+
V.graph.wrapper_code.codegen_inplace_reuse(
|
| 320 |
+
input_node.node, self.node
|
| 321 |
+
)
|
| 322 |
+
# hacky check for if V.kernel is a real kernel or NullHandler
|
| 323 |
+
if hasattr(V.kernel, "args"):
|
| 324 |
+
# if there isn't a triton kernel, then we don't need to call triton-specific things.
|
| 325 |
+
# but TODO this might be a convenient place to signal to the Collective kernels to inplace
|
| 326 |
+
# (and, can we make "kernel" less generic of a name?)
|
| 327 |
+
V.kernel.args.make_inplace(
|
| 328 |
+
input_node.get_name(), self.get_name()
|
| 329 |
+
)
|
| 330 |
+
# mutations not tracked in cpp kernels
|
| 331 |
+
if isinstance(
|
| 332 |
+
V.kernel, torch._inductor.codegen.triton.TritonKernel
|
| 333 |
+
):
|
| 334 |
+
V.kernel.mutations.add(input_node.get_name())
|
| 335 |
+
V.kernel.mutations.add(self.get_name())
|
| 336 |
+
|
| 337 |
+
# update last usage of reused node
|
| 338 |
+
self.last_usage.discard(input_node.get_name())
|
| 339 |
+
|
| 340 |
+
return
|
| 341 |
+
V.graph.wrapper_code.codegen_allocation(self.node)
|
| 342 |
+
|
| 343 |
+
def can_free(self):
|
| 344 |
+
for use in self.users:
|
| 345 |
+
if isinstance(use.node, OutputNode):
|
| 346 |
+
return False
|
| 347 |
+
return True
|
| 348 |
+
|
| 349 |
+
def codegen_originating_info(self, buffer, only_once=True):
|
| 350 |
+
if not config.comment_origin:
|
| 351 |
+
return
|
| 352 |
+
|
| 353 |
+
if only_once and self.written:
|
| 354 |
+
return
|
| 355 |
+
origins = self.node.origins
|
| 356 |
+
out_lines = []
|
| 357 |
+
|
| 358 |
+
for o in origins:
|
| 359 |
+
if o.op == "output":
|
| 360 |
+
# These are boring and samey
|
| 361 |
+
continue
|
| 362 |
+
|
| 363 |
+
out_lines.append("")
|
| 364 |
+
# TODO(voz): Should the pragma be constant somewhere?
|
| 365 |
+
out_lines.append("#pragma CMT ORIGIN:")
|
| 366 |
+
op_info_str = f"#pragma CMT {o.op} {o.target}"
|
| 367 |
+
if "seq_nr" in o.meta:
|
| 368 |
+
op_info_str = op_info_str + f" seq_nr:{o.meta['seq_nr']}"
|
| 369 |
+
out_lines.append(op_info_str)
|
| 370 |
+
if "stack_trace" in o.meta:
|
| 371 |
+
stack_trace = f"{o.meta['stack_trace']}"
|
| 372 |
+
stack_trace_last_line = stack_trace.split("|")[-1]
|
| 373 |
+
out_lines.append(
|
| 374 |
+
"#pragma CMT "
|
| 375 |
+
+ stack_trace_last_line.replace("{", "{{")
|
| 376 |
+
.replace("}", "}}")
|
| 377 |
+
.replace("\n", "\\")
|
| 378 |
+
)
|
| 379 |
+
out_lines.append("#pragma CMT END ORIGIN")
|
| 380 |
+
out_lines.append("")
|
| 381 |
+
|
| 382 |
+
if len(out_lines) == 0:
|
| 383 |
+
return
|
| 384 |
+
|
| 385 |
+
# TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does
|
| 386 |
+
# not use BracesBuffer, so we have no good indicator of a C++ buffer atm.
|
| 387 |
+
buffer.writelines(out_lines)
|
| 388 |
+
self.written = True
|
| 389 |
+
|
| 390 |
+
def get_read_write_buffers_sizes(self) -> int:
|
| 391 |
+
if isinstance(self, NopKernelSchedulerNode):
|
| 392 |
+
return 0
|
| 393 |
+
reads = {dep.name for dep in self.read_writes.reads}
|
| 394 |
+
writes = {dep.name for dep in self.read_writes.writes}
|
| 395 |
+
|
| 396 |
+
def is_materialized(buf):
|
| 397 |
+
buf_uses = {user.node for user in self.scheduler.name_to_node[buf].users}
|
| 398 |
+
return len(buf_uses - set(self.snodes)) > 0
|
| 399 |
+
|
| 400 |
+
if isinstance(self, FusedSchedulerNode):
|
| 401 |
+
removed_buffers = {dep for dep in writes if not is_materialized(dep)}
|
| 402 |
+
writes = writes - removed_buffers
|
| 403 |
+
reads = reads - removed_buffers
|
| 404 |
+
node_bytes = 0
|
| 405 |
+
for buf in reads | writes:
|
| 406 |
+
if buf in V.graph.name_to_buffer:
|
| 407 |
+
buf = V.graph.name_to_buffer[buf]
|
| 408 |
+
elif buf in V.graph.graph_inputs:
|
| 409 |
+
buf = V.graph.graph_inputs[buf]
|
| 410 |
+
else:
|
| 411 |
+
continue
|
| 412 |
+
|
| 413 |
+
node_bytes += V.graph.sizevars.size_hint(
|
| 414 |
+
sympy_product(buf.get_size())
|
| 415 |
+
) * get_dtype_size(buf.get_dtype())
|
| 416 |
+
return node_bytes
|
| 417 |
+
|
| 418 |
+
def get_estimated_runtime(self) -> float:
|
| 419 |
+
layout = None
|
| 420 |
+
dtype = None
|
| 421 |
+
if not self.node:
|
| 422 |
+
assert self.snodes
|
| 423 |
+
layout = self.snodes[0].node.get_layout()
|
| 424 |
+
dtype = self.snodes[0].node.get_dtype()
|
| 425 |
+
else:
|
| 426 |
+
layout = self.node.get_layout()
|
| 427 |
+
dtype = self.node.get_dtype()
|
| 428 |
+
|
| 429 |
+
if "cuda" != layout.device.type:
|
| 430 |
+
# default to no reordering based on runtime
|
| 431 |
+
return 0
|
| 432 |
+
|
| 433 |
+
try:
|
| 434 |
+
gpu_memory_bandwidth = get_gpu_dram_gbps()
|
| 435 |
+
gpu_flops = get_device_tflops(dtype) * 10**12
|
| 436 |
+
except Exception:
|
| 437 |
+
return 0
|
| 438 |
+
|
| 439 |
+
if isinstance(self, ExternKernelSchedulerNode):
|
| 440 |
+
op = kernel_name_to_op.get(getattr(self.node, "kernel", ""), None)
|
| 441 |
+
|
| 442 |
+
# if there is a resolved op, dry-run using fake mode and record flop count
|
| 443 |
+
if op is not None:
|
| 444 |
+
from torch._subclasses.fake_tensor import FakeTensorMode
|
| 445 |
+
from torch.utils.flop_counter import FlopCounterMode
|
| 446 |
+
|
| 447 |
+
with FakeTensorMode(), FlopCounterMode(
|
| 448 |
+
display=False
|
| 449 |
+
) as flop_counter_mode:
|
| 450 |
+
from .ir import ir_node_to_tensor
|
| 451 |
+
|
| 452 |
+
fake_inputs = [
|
| 453 |
+
ir_node_to_tensor(input) for input in self.node.inputs
|
| 454 |
+
]
|
| 455 |
+
cls = self.node.__class__
|
| 456 |
+
cls.process_kernel(op, *fake_inputs, **self.node.kwargs)
|
| 457 |
+
|
| 458 |
+
# TODO(xmfan): find a better heuristic to model FLOPS/latency relationship
|
| 459 |
+
factor = 0.5
|
| 460 |
+
counted_flops = flop_counter_mode.get_total_flops()
|
| 461 |
+
return factor * counted_flops / gpu_flops
|
| 462 |
+
|
| 463 |
+
elif isinstance(self, FusedSchedulerNode) or isinstance(
|
| 464 |
+
self.node, ComputedBuffer
|
| 465 |
+
):
|
| 466 |
+
return self.get_read_write_buffers_sizes() / gpu_memory_bandwidth
|
| 467 |
+
|
| 468 |
+
# TODO(xmfan): add support for CollectiveKernel
|
| 469 |
+
|
| 470 |
+
return 0
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
class ExternKernelSchedulerNode(BaseSchedulerNode):
|
| 474 |
+
def debug_str_extra(self) -> str:
|
| 475 |
+
return f"{self.get_name()}.node.kernel = {getattr(self.node, 'kernel', None)}"
|
| 476 |
+
|
| 477 |
+
def is_extern(self):
|
| 478 |
+
return True
|
| 479 |
+
|
| 480 |
+
def has_side_effects(self):
|
| 481 |
+
return hasattr(self.node, "has_side_effects") and self.node.has_side_effects()
|
| 482 |
+
|
| 483 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 484 |
+
if self.get_aliases() or self.is_template():
|
| 485 |
+
return False
|
| 486 |
+
|
| 487 |
+
if read_dep.name not in self.scheduler.name_to_node:
|
| 488 |
+
# don't allow reuse of an 'input' buffer, we don't own it
|
| 489 |
+
# (would this have been fixed if I tracked mutations properly above?)
|
| 490 |
+
return False
|
| 491 |
+
|
| 492 |
+
if not isinstance(
|
| 493 |
+
self.node, (torch._inductor.ir.AllReduce, torch._inductor.ir.InPlaceHint)
|
| 494 |
+
):
|
| 495 |
+
# TODO make this a property of the IR
|
| 496 |
+
return False
|
| 497 |
+
|
| 498 |
+
if len(self.read_writes.writes) == 1:
|
| 499 |
+
write_dep = next(iter(self.read_writes.writes))
|
| 500 |
+
return read_dep.numbytes_hint() == write_dep.numbytes_hint()
|
| 501 |
+
|
| 502 |
+
return False
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
class NopKernelSchedulerNode(BaseSchedulerNode):
|
| 506 |
+
pass
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
class SchedulerNode(BaseSchedulerNode):
|
| 510 |
+
def __init__(self, scheduler: "Scheduler", node: ir.ComputedBuffer, group_fn):
|
| 511 |
+
super().__init__(scheduler, node)
|
| 512 |
+
(
|
| 513 |
+
self._sizes,
|
| 514 |
+
self._body,
|
| 515 |
+
) = node.simplify_and_reorder()
|
| 516 |
+
|
| 517 |
+
self.group = (node.get_device(), group_fn(self._sizes))
|
| 518 |
+
|
| 519 |
+
if self.is_template():
|
| 520 |
+
self.set_read_writes(node.normalized_read_writes())
|
| 521 |
+
else:
|
| 522 |
+
self.set_read_writes(
|
| 523 |
+
dependencies.extract_read_writes(
|
| 524 |
+
self._body, *self._sizes, normalize=True
|
| 525 |
+
)
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
def debug_str_extra(self) -> str:
|
| 529 |
+
name = self.get_name()
|
| 530 |
+
lines = [
|
| 531 |
+
f"{name}.group.device = {self.group[0]}",
|
| 532 |
+
f"{name}.group.iteration = {self.group[1]}",
|
| 533 |
+
f"{name}.sizes = {self._sizes}",
|
| 534 |
+
]
|
| 535 |
+
if self.get_aliases():
|
| 536 |
+
lines.append(f"{name}.aliases = {pformat(self.get_aliases())}")
|
| 537 |
+
if self.get_mutations():
|
| 538 |
+
lines.append(f"{name}.mutations = {pformat(self.get_mutations())}")
|
| 539 |
+
if isinstance(self._body, ir.LoopBody):
|
| 540 |
+
lines.append(f"class {name}_loop_body:")
|
| 541 |
+
lines.append(textwrap.indent(self._body.debug_str(), " "))
|
| 542 |
+
return "\n".join(lines)
|
| 543 |
+
|
| 544 |
+
def get_ranges(self):
|
| 545 |
+
return self._sizes
|
| 546 |
+
|
| 547 |
+
def is_reduction(self):
|
| 548 |
+
return bool(self.node.get_reduction_type())
|
| 549 |
+
|
| 550 |
+
def is_template(self):
|
| 551 |
+
return isinstance(self.node, ir.TemplateBuffer)
|
| 552 |
+
|
| 553 |
+
def run(self, *index_vars):
|
| 554 |
+
self.mark_run()
|
| 555 |
+
self.codegen(index_vars)
|
| 556 |
+
|
| 557 |
+
def mark_run(self):
|
| 558 |
+
self.allocate()
|
| 559 |
+
|
| 560 |
+
def ranges_from_index_vars(self, index_vars):
|
| 561 |
+
sizes = self._sizes
|
| 562 |
+
assert sum(map(len, sizes)) == sum(map(len, index_vars))
|
| 563 |
+
var_ranges = dict(
|
| 564 |
+
zip(
|
| 565 |
+
itertools.chain.from_iterable(index_vars),
|
| 566 |
+
itertools.chain.from_iterable(sizes),
|
| 567 |
+
)
|
| 568 |
+
)
|
| 569 |
+
return var_ranges
|
| 570 |
+
|
| 571 |
+
def codegen(self, index_vars):
|
| 572 |
+
var_ranges = self.ranges_from_index_vars(index_vars)
|
| 573 |
+
try:
|
| 574 |
+
with V.set_ops_handler(
|
| 575 |
+
SimplifyIndexing(V.get_ops_handler(), var_ranges)
|
| 576 |
+
), V.kernel.set_current_node(self):
|
| 577 |
+
self._body(*index_vars)
|
| 578 |
+
except Exception:
|
| 579 |
+
log.fatal("Error in codegen for %s", self.node)
|
| 580 |
+
raise
|
| 581 |
+
|
| 582 |
+
def pointwise_read_writes(self):
|
| 583 |
+
"""
|
| 584 |
+
Get the memory dependencies in the non-reduction axis.
|
| 585 |
+
"""
|
| 586 |
+
sizes, reduction_sizes = self._sizes
|
| 587 |
+
|
| 588 |
+
def fn(index):
|
| 589 |
+
return self._body(index, [sympy.Integer(0) for _ in reduction_sizes])
|
| 590 |
+
|
| 591 |
+
return dependencies.extract_read_writes(fn, sizes)
|
| 592 |
+
|
| 593 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 594 |
+
if self.get_aliases() or self.is_template():
|
| 595 |
+
return False
|
| 596 |
+
if len(self.read_writes.writes) == 1 and isinstance(
|
| 597 |
+
read_dep, dependencies.MemoryDep
|
| 598 |
+
):
|
| 599 |
+
write_dep = next(iter(self.read_writes.writes))
|
| 600 |
+
return read_dep.index == write_dep.index and read_dep.size == write_dep.size
|
| 601 |
+
return False
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
class FusedSchedulerNode(BaseSchedulerNode):
|
| 605 |
+
"""
|
| 606 |
+
This is a "fake" scheduler node that represents a group of scheduler nodes
|
| 607 |
+
that are meant to be fused together. The way it does this is by maintaining
|
| 608 |
+
its unmet dependencies as the union of its constituent nodes.
|
| 609 |
+
"""
|
| 610 |
+
|
| 611 |
+
@classmethod
|
| 612 |
+
def fuse(cls, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 613 |
+
assert node1.scheduler is node2.scheduler
|
| 614 |
+
return cls(node1.scheduler, node1.get_nodes() + node2.get_nodes())
|
| 615 |
+
|
| 616 |
+
def __init__(self, scheduler: "Scheduler", snodes: List[SchedulerNode]):
|
| 617 |
+
# NB: No need to call super().__init__() because we don't need to re-use any of its logic.
|
| 618 |
+
self.snodes = snodes
|
| 619 |
+
self.scheduler = scheduler
|
| 620 |
+
self.node = None # type: ignore[assignment]
|
| 621 |
+
self.users = None
|
| 622 |
+
self.inverse_users = []
|
| 623 |
+
self.group = max(snodes, key=lambda x: int(x.is_reduction())).group
|
| 624 |
+
self.recursive_predecessors = set.union(
|
| 625 |
+
*[x.recursive_predecessors for x in snodes]
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
self.set_read_writes(
|
| 629 |
+
dependencies.ReadWrites.merge_list([x.read_writes for x in snodes])
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
self.unmet_dependencies = {
|
| 633 |
+
dep
|
| 634 |
+
for dep in set.union(*[x.unmet_dependencies for x in snodes])
|
| 635 |
+
if dep.name not in self.get_names()
|
| 636 |
+
} - self.read_writes.writes
|
| 637 |
+
self.min_order = min([x.min_order for x in self.snodes])
|
| 638 |
+
self.max_order = max([x.max_order for x in self.snodes])
|
| 639 |
+
|
| 640 |
+
@cache_on_self
|
| 641 |
+
def get_name(self) -> str:
|
| 642 |
+
return "_".join([x.get_name() for x in self.snodes])
|
| 643 |
+
|
| 644 |
+
def get_first_name(self) -> str:
|
| 645 |
+
return self.snodes[0].get_name()
|
| 646 |
+
|
| 647 |
+
@cache_on_self
|
| 648 |
+
def get_names(self) -> Set[str]:
|
| 649 |
+
return set.union(*[x.get_names() for x in self.snodes])
|
| 650 |
+
|
| 651 |
+
def debug_str_extra(self) -> str:
|
| 652 |
+
lines = [
|
| 653 |
+
f"{self.get_name()}.snodes[{i}] =\n{node.debug_str()}"
|
| 654 |
+
for i, node in enumerate(self.snodes)
|
| 655 |
+
]
|
| 656 |
+
return textwrap.indent("\n".join(lines).rstrip(), " ")
|
| 657 |
+
|
| 658 |
+
def set_last_usage(
|
| 659 |
+
self, future_used_buffers: Set[str], mutation_real_name: Dict[str, str]
|
| 660 |
+
):
|
| 661 |
+
# Set self.last_usage using the global information
|
| 662 |
+
# This will be used for inter-kernel optimisations
|
| 663 |
+
super().set_last_usage(future_used_buffers, mutation_real_name)
|
| 664 |
+
# Set self.last_usage on the snodes
|
| 665 |
+
# This will be used for optimisations within the kernel
|
| 666 |
+
future_used_buffers = set()
|
| 667 |
+
for node in reversed(self.snodes):
|
| 668 |
+
node.set_last_usage(future_used_buffers, mutation_real_name)
|
| 669 |
+
future_used_buffers.update(node.last_usage)
|
| 670 |
+
|
| 671 |
+
@cache_on_self
|
| 672 |
+
def used_buffer_names(self) -> Set[str]:
|
| 673 |
+
return set.union(*[x.used_buffer_names() for x in self.snodes])
|
| 674 |
+
|
| 675 |
+
@cache_on_self
|
| 676 |
+
def used_or_aliased_buffer_names(self) -> Set[str]:
|
| 677 |
+
return set.union(*[x.used_or_aliased_buffer_names() for x in self.snodes])
|
| 678 |
+
|
| 679 |
+
def get_nodes(self) -> List[BaseSchedulerNode]:
|
| 680 |
+
return self.snodes
|
| 681 |
+
|
| 682 |
+
def __repr__(self):
|
| 683 |
+
return f"{type(self).__name__}(nodes={self.get_name()})"
|
| 684 |
+
|
| 685 |
+
@cache_on_self
|
| 686 |
+
def is_reduction(self):
|
| 687 |
+
return any(x.is_reduction() for x in self.snodes)
|
| 688 |
+
|
| 689 |
+
@cache_on_self
|
| 690 |
+
def is_template(self):
|
| 691 |
+
return any(x.is_template() for x in self.snodes)
|
| 692 |
+
|
| 693 |
+
def is_foreach(self):
|
| 694 |
+
return False
|
| 695 |
+
|
| 696 |
+
def get_device(self):
|
| 697 |
+
return self.group[0]
|
| 698 |
+
|
| 699 |
+
@cache_on_self
|
| 700 |
+
def has_aliasing_or_mutation(self):
|
| 701 |
+
return any(x.has_aliasing_or_mutation() for x in self.snodes)
|
| 702 |
+
|
| 703 |
+
@cache_on_self
|
| 704 |
+
def op_counts(self):
|
| 705 |
+
op_counts = collections.Counter()
|
| 706 |
+
for node in self.snodes:
|
| 707 |
+
op_counts.update(node.op_counts())
|
| 708 |
+
return op_counts
|
| 709 |
+
|
| 710 |
+
# None of these need to be implemented, as a FusedSchedulerNode is just an
|
| 711 |
+
# abstraction for scheduling purposes
|
| 712 |
+
def update_mutated_names(self, renames: Dict[str, str]):
|
| 713 |
+
raise NotImplementedError
|
| 714 |
+
|
| 715 |
+
def add_mutation_dep(self, name):
|
| 716 |
+
raise NotImplementedError
|
| 717 |
+
|
| 718 |
+
def set_users(self, users: List["NodeUser"]):
|
| 719 |
+
raise NotImplementedError
|
| 720 |
+
|
| 721 |
+
def get_aliases(self):
|
| 722 |
+
raise NotImplementedError
|
| 723 |
+
|
| 724 |
+
def get_mutations(self):
|
| 725 |
+
raise NotImplementedError
|
| 726 |
+
|
| 727 |
+
def can_inplace(self, read_dep: dependencies.MemoryDep):
|
| 728 |
+
raise NotImplementedError
|
| 729 |
+
|
| 730 |
+
def allocate(self):
|
| 731 |
+
raise NotImplementedError
|
| 732 |
+
|
| 733 |
+
def can_free(self):
|
| 734 |
+
raise NotImplementedError
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
class ForeachKernelSchedulerNode(FusedSchedulerNode):
|
| 738 |
+
"""Scheduler node which consists of a list of scheduler nodes that each operate on a
|
| 739 |
+
distinct tensor in a list of tensors."""
|
| 740 |
+
|
| 741 |
+
def get_consumer_subnode_for(self, producer):
|
| 742 |
+
if producer.get_name() in self.read_to_node:
|
| 743 |
+
return self.read_to_node[producer.get_name()]
|
| 744 |
+
|
| 745 |
+
return None
|
| 746 |
+
|
| 747 |
+
def get_producer_subnode_for(self, consumer):
|
| 748 |
+
for rd in consumer.read_writes.reads:
|
| 749 |
+
if rd.name in self.name_to_node:
|
| 750 |
+
return self.name_to_node[rd.name]
|
| 751 |
+
|
| 752 |
+
return None
|
| 753 |
+
|
| 754 |
+
@classmethod
|
| 755 |
+
def can_fuse(cls, producer, consumer):
|
| 756 |
+
if producer.is_foreach() and consumer.is_foreach():
|
| 757 |
+
return len(producer.snodes) == len(consumer.snodes) and all(
|
| 758 |
+
producer.scheduler.can_fuse(l, r)
|
| 759 |
+
for l, r in zip(producer.snodes, consumer.snodes)
|
| 760 |
+
)
|
| 761 |
+
elif consumer.is_foreach():
|
| 762 |
+
consumer_subnode = consumer.get_consumer_subnode_for(producer)
|
| 763 |
+
if consumer_subnode is not None:
|
| 764 |
+
return consumer.scheduler.can_fuse(producer, consumer_subnode)
|
| 765 |
+
|
| 766 |
+
return False
|
| 767 |
+
|
| 768 |
+
elif producer.is_foreach():
|
| 769 |
+
producer_subnode = producer.get_producer_subnode_for(consumer)
|
| 770 |
+
if producer_subnode is not None:
|
| 771 |
+
return producer.scheduler.can_fuse(producer_subnode, consumer)
|
| 772 |
+
|
| 773 |
+
return False
|
| 774 |
+
|
| 775 |
+
raise AssertionError(
|
| 776 |
+
"At least one node passed to ForeachKernelSchedulerNode.can_fuse should be a foreach node"
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
@classmethod
|
| 780 |
+
def fuse(cls, producer, consumer):
|
| 781 |
+
assert producer.is_foreach() or consumer.is_foreach()
|
| 782 |
+
prev_node_1 = None
|
| 783 |
+
prev_node_2 = None
|
| 784 |
+
if producer.is_foreach() and consumer.is_foreach():
|
| 785 |
+
fused_nodes = [
|
| 786 |
+
FusedSchedulerNode.fuse(l, r)
|
| 787 |
+
for l, r in zip(producer.snodes, consumer.snodes)
|
| 788 |
+
]
|
| 789 |
+
elif producer.is_foreach():
|
| 790 |
+
producer_subnode = producer.get_producer_subnode_for(consumer)
|
| 791 |
+
fused_nodes = []
|
| 792 |
+
prev_node_1 = producer
|
| 793 |
+
prev_node_2 = None
|
| 794 |
+
for node in producer.snodes:
|
| 795 |
+
if node is producer_subnode:
|
| 796 |
+
new_node = FusedSchedulerNode.fuse(node, consumer)
|
| 797 |
+
prev_node_2 = new_node
|
| 798 |
+
fused_nodes.append(new_node)
|
| 799 |
+
else:
|
| 800 |
+
fused_nodes.append(node)
|
| 801 |
+
|
| 802 |
+
elif consumer.is_foreach():
|
| 803 |
+
consumer_subnode = consumer.get_consumer_subnode_for(producer)
|
| 804 |
+
fused_nodes = []
|
| 805 |
+
prev_node_1 = consumer
|
| 806 |
+
prev_node_2 = None
|
| 807 |
+
|
| 808 |
+
for node in consumer.snodes:
|
| 809 |
+
if node is consumer_subnode:
|
| 810 |
+
new_node = FusedSchedulerNode.fuse(producer, node)
|
| 811 |
+
prev_node_2 = new_node
|
| 812 |
+
fused_nodes.append(new_node)
|
| 813 |
+
else:
|
| 814 |
+
fused_nodes.append(node)
|
| 815 |
+
|
| 816 |
+
return cls(producer.scheduler, fused_nodes, prev_node_1, prev_node_2)
|
| 817 |
+
|
| 818 |
+
def __init__(
|
| 819 |
+
self,
|
| 820 |
+
scheduler: "Scheduler",
|
| 821 |
+
nodes: List[SchedulerNode],
|
| 822 |
+
prev_node_1=None,
|
| 823 |
+
prev_node_2=None,
|
| 824 |
+
):
|
| 825 |
+
self.read_to_node = {}
|
| 826 |
+
self.name_to_node = {}
|
| 827 |
+
|
| 828 |
+
if prev_node_1 is None or prev_node_2 is None:
|
| 829 |
+
super().__init__(scheduler, nodes)
|
| 830 |
+
|
| 831 |
+
for node in nodes:
|
| 832 |
+
for read in node.read_writes.reads:
|
| 833 |
+
self.read_to_node[read.name] = node
|
| 834 |
+
|
| 835 |
+
for name in node.get_names():
|
| 836 |
+
self.name_to_node[name] = node
|
| 837 |
+
else:
|
| 838 |
+
self.scheduler = scheduler
|
| 839 |
+
self.snodes = nodes
|
| 840 |
+
|
| 841 |
+
self.set_read_writes(
|
| 842 |
+
dependencies.ReadWrites.merge_list(
|
| 843 |
+
[prev_node_1.read_writes, prev_node_2.read_writes]
|
| 844 |
+
)
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
self.unmet_dependencies = {
|
| 848 |
+
dep
|
| 849 |
+
for dep in set.union(
|
| 850 |
+
prev_node_1.unmet_dependencies, prev_node_2.unmet_dependencies
|
| 851 |
+
)
|
| 852 |
+
if dep.name not in self.get_names()
|
| 853 |
+
} - self.read_writes.writes
|
| 854 |
+
|
| 855 |
+
self.min_order = min([prev_node_1.min_order, prev_node_2.min_order])
|
| 856 |
+
self.max_order = max([prev_node_1.max_order, prev_node_2.max_order])
|
| 857 |
+
|
| 858 |
+
foreach_node = prev_node_1 if prev_node_1.is_foreach() else prev_node_2
|
| 859 |
+
other_node = prev_node_2 if prev_node_1.is_foreach() else prev_node_1
|
| 860 |
+
|
| 861 |
+
self.recursive_predecessors = foreach_node.recursive_predecessors
|
| 862 |
+
self.recursive_predecessors.update(other_node.recursive_predecessors)
|
| 863 |
+
|
| 864 |
+
self.name_to_node = foreach_node.name_to_node
|
| 865 |
+
for name in other_node.get_names():
|
| 866 |
+
self.name_to_node[name] = other_node
|
| 867 |
+
|
| 868 |
+
self.group = (nodes[0].get_device(), 0)
|
| 869 |
+
|
| 870 |
+
self.origins = set()
|
| 871 |
+
|
| 872 |
+
def mark_run(self):
|
| 873 |
+
raise NotImplementedError
|
| 874 |
+
|
| 875 |
+
def codegen(self):
|
| 876 |
+
self.node.get_store_function()(self.node.make_loader()())
|
| 877 |
+
|
| 878 |
+
def can_free(self):
|
| 879 |
+
return NotImplementedError
|
| 880 |
+
|
| 881 |
+
def is_foreach(self):
|
| 882 |
+
return True
|
| 883 |
+
|
| 884 |
+
def get_subkernel_nodes(self):
|
| 885 |
+
"""Returns a list of nodes which comprise the foreach kernel, operating on corresponding elements of our input lists.
|
| 886 |
+
These nodes may be vertically fused."""
|
| 887 |
+
return list(self.snodes)
|
| 888 |
+
|
| 889 |
+
def get_nodes(self):
|
| 890 |
+
"""Returns all nodes contained in this kernel, unpacking fused nodes into their constituent scheduler nodes."""
|
| 891 |
+
return list(itertools.chain(*[x.get_nodes() for x in self.snodes]))
|
| 892 |
+
|
| 893 |
+
def get_first_name(self):
|
| 894 |
+
return self.snodes[0].get_first_name()
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
def pick_loop_order(stride_lengths, sizes, priority_idx=()):
|
| 898 |
+
"""
|
| 899 |
+
A heuristic to decide loop iteration orders. This has not been well
|
| 900 |
+
tuned and may be something we should autotune.
|
| 901 |
+
"""
|
| 902 |
+
|
| 903 |
+
@functools.cmp_to_key
|
| 904 |
+
def index_cmp(a, b):
|
| 905 |
+
if sizes[a] == 1 or sizes[b] == 1:
|
| 906 |
+
# 1-sizes don't matter, just move them to the end
|
| 907 |
+
return cmp(sizes[a] == 1, sizes[b] == 1)
|
| 908 |
+
|
| 909 |
+
stride_len_a = [sl[a] for sl in stride_lengths]
|
| 910 |
+
stride_len_b = [sl[b] for sl in stride_lengths]
|
| 911 |
+
|
| 912 |
+
# equivalent to
|
| 913 |
+
# np.logical_or(stride_lengths[:, b] == 0, stride_lengths[:, a] < stride_lengths[:, b]).all()
|
| 914 |
+
a_first = sum(
|
| 915 |
+
sl_b == 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b)
|
| 916 |
+
)
|
| 917 |
+
b_first = sum(
|
| 918 |
+
sl_a == 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b)
|
| 919 |
+
)
|
| 920 |
+
if a_first > b_first:
|
| 921 |
+
return -1
|
| 922 |
+
if b_first > a_first:
|
| 923 |
+
return 1
|
| 924 |
+
|
| 925 |
+
# otherwise contiguous
|
| 926 |
+
return cmp(b, a)
|
| 927 |
+
|
| 928 |
+
order = list(reversed(range(len(stride_lengths[0]))))
|
| 929 |
+
if len(priority_idx) > 0:
|
| 930 |
+
# if we have priority node, only use that node's order
|
| 931 |
+
stride_lengths = [stride_lengths[pi] for pi in priority_idx]
|
| 932 |
+
if config.pick_loop_orders:
|
| 933 |
+
order.sort(key=index_cmp)
|
| 934 |
+
return order
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
@dataclasses.dataclass
|
| 938 |
+
class NodeUser:
|
| 939 |
+
node: BaseSchedulerNode
|
| 940 |
+
can_inplace: bool = False
|
| 941 |
+
|
| 942 |
+
# A weak user must be scheduled after a given node, but doesn't actually
|
| 943 |
+
# use the result
|
| 944 |
+
is_weak: bool = False
|
| 945 |
+
|
| 946 |
+
def get_name(self):
|
| 947 |
+
return self.node.get_name()
|
| 948 |
+
|
| 949 |
+
def merge(self, other: "NodeUser") -> "NodeUser":
|
| 950 |
+
assert self.node is other.node
|
| 951 |
+
return NodeUser(
|
| 952 |
+
self.node,
|
| 953 |
+
self.can_inplace and other.can_inplace,
|
| 954 |
+
self.is_weak and other.is_weak,
|
| 955 |
+
)
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
class Scheduler:
|
| 959 |
+
@dynamo_timed
|
| 960 |
+
def __init__(self, nodes):
|
| 961 |
+
super().__init__()
|
| 962 |
+
self.backends = {}
|
| 963 |
+
self.fuse_cache = {}
|
| 964 |
+
|
| 965 |
+
self.nodes = []
|
| 966 |
+
self.available_buffer_names = {
|
| 967 |
+
*V.graph.graph_inputs.keys(),
|
| 968 |
+
*V.graph.constants.keys(),
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
self.nodes = [self.create_scheduler_node(n) for n in nodes]
|
| 972 |
+
|
| 973 |
+
# some new constants could have been created above
|
| 974 |
+
self.available_buffer_names.update(V.graph.constants.keys())
|
| 975 |
+
for node in self.nodes:
|
| 976 |
+
node.prune_deps()
|
| 977 |
+
|
| 978 |
+
self.name_to_node = {n.get_name(): n for n in self.nodes}
|
| 979 |
+
self.name_to_fused_node = None # set in fuse_nods()
|
| 980 |
+
|
| 981 |
+
# we handle mutation by renaming modified versions of the same
|
| 982 |
+
# buffer in the dependency graph to prevent cycles.
|
| 983 |
+
# mutation_renames: tracks the current name for a given buffer
|
| 984 |
+
# (changed once per mutation)
|
| 985 |
+
self.mutation_real_name = {}
|
| 986 |
+
# mutation_real_name: maps back to the original name for codegen
|
| 987 |
+
self.mutation_renames = {}
|
| 988 |
+
|
| 989 |
+
self.compute_dependencies()
|
| 990 |
+
self.topological_sort_schedule()
|
| 991 |
+
self.dead_node_elimination()
|
| 992 |
+
self.compute_predecessors()
|
| 993 |
+
|
| 994 |
+
metrics.ir_nodes_pre_fusion += len(self.nodes)
|
| 995 |
+
V.debug.ir_pre_fusion(self.nodes)
|
| 996 |
+
self.num_orig_nodes = len(self.nodes)
|
| 997 |
+
self.name_to_fused_node = {n.get_name(): n for n in self.nodes}
|
| 998 |
+
self.create_foreach_nodes()
|
| 999 |
+
self.topological_sort_schedule()
|
| 1000 |
+
self.fuse_nodes()
|
| 1001 |
+
self.compute_last_usage()
|
| 1002 |
+
V.debug.ir_post_fusion(self.nodes)
|
| 1003 |
+
V.debug.graph_diagram(self.nodes)
|
| 1004 |
+
self.debug_draw_graph()
|
| 1005 |
+
|
| 1006 |
+
# used during codegen:
|
| 1007 |
+
self.current_device = None
|
| 1008 |
+
self.buffer_names_to_free = set()
|
| 1009 |
+
self.buffer_names_no_longer_needed = set()
|
| 1010 |
+
|
| 1011 |
+
# fx graph node to the position it appears in the graph
|
| 1012 |
+
# for debug attribution
|
| 1013 |
+
self.origin_to_index = {}
|
| 1014 |
+
|
| 1015 |
+
log.info("Number of scheduler nodes after fusion %d", len(self.nodes))
|
| 1016 |
+
|
| 1017 |
+
def debug_draw_graph(self):
|
| 1018 |
+
"""Generate an image of the graph for debugging"""
|
| 1019 |
+
if os.environ.get("INDUCTOR_WRITE_SCHEDULER_GRAPH", None) == "1":
|
| 1020 |
+
from .debug import draw_buffers
|
| 1021 |
+
|
| 1022 |
+
draw_buffers(self.nodes, print_graph=True)
|
| 1023 |
+
|
| 1024 |
+
def debug_print_nodes(self, label):
|
| 1025 |
+
if log.isEnabledFor(logging.INFO):
|
| 1026 |
+
log.info("%s:", label)
|
| 1027 |
+
for node in self.nodes:
|
| 1028 |
+
node.log_details()
|
| 1029 |
+
|
| 1030 |
+
def create_scheduler_node(self, node):
|
| 1031 |
+
assert (
|
| 1032 |
+
node.origins is not None
|
| 1033 |
+
), "All nodes passed to scheduling must have an origin"
|
| 1034 |
+
if node.is_no_op():
|
| 1035 |
+
return NopKernelSchedulerNode(self, node)
|
| 1036 |
+
elif isinstance(node, (ir.ComputedBuffer, ir.TemplateBuffer)):
|
| 1037 |
+
group_fn = self.get_backend(node.get_device()).group_fn
|
| 1038 |
+
return SchedulerNode(self, node, group_fn)
|
| 1039 |
+
elif isinstance(node, ir.ExternKernel):
|
| 1040 |
+
return ExternKernelSchedulerNode(self, node)
|
| 1041 |
+
else:
|
| 1042 |
+
raise NotImplementedError(node)
|
| 1043 |
+
|
| 1044 |
+
def create_foreach_nodes(self):
|
| 1045 |
+
removed_node_names = set()
|
| 1046 |
+
fe_nodes = []
|
| 1047 |
+
kept_node_names = self.name_to_fused_node.keys()
|
| 1048 |
+
|
| 1049 |
+
for names in V.graph.lists.values():
|
| 1050 |
+
removed_node_names.update(names)
|
| 1051 |
+
|
| 1052 |
+
names = [name for name in names if name in kept_node_names]
|
| 1053 |
+
if not names:
|
| 1054 |
+
# All nodes eliminated
|
| 1055 |
+
continue
|
| 1056 |
+
|
| 1057 |
+
snodes = [self.name_to_node[name] for name in names]
|
| 1058 |
+
fe_node = ForeachKernelSchedulerNode(self, snodes)
|
| 1059 |
+
|
| 1060 |
+
fe_nodes.append(fe_node)
|
| 1061 |
+
|
| 1062 |
+
for name in names:
|
| 1063 |
+
self.name_to_fused_node[name] = fe_node
|
| 1064 |
+
|
| 1065 |
+
self.nodes = [
|
| 1066 |
+
node for node in self.nodes if node.get_name() not in removed_node_names
|
| 1067 |
+
] + fe_nodes
|
| 1068 |
+
|
| 1069 |
+
def compute_dependencies(self):
|
| 1070 |
+
"""
|
| 1071 |
+
Create dependency edges between nodes, handling aliasing and
|
| 1072 |
+
mutation properly.
|
| 1073 |
+
"""
|
| 1074 |
+
name_to_users = collections.defaultdict(list)
|
| 1075 |
+
|
| 1076 |
+
# handle aliasing by using python aliasing in name_to_users
|
| 1077 |
+
# if foo aliases bar then we will make name_to_users["foo"] point
|
| 1078 |
+
# to the same python list as name_to_users["bar"]
|
| 1079 |
+
for node1 in self.nodes:
|
| 1080 |
+
node1_name = node1.get_name()
|
| 1081 |
+
for node2_name in node1.get_aliases():
|
| 1082 |
+
if node1_name in name_to_users and node2_name in name_to_users:
|
| 1083 |
+
# merge the two
|
| 1084 |
+
list1 = name_to_users[node1_name]
|
| 1085 |
+
list2 = name_to_users[node2_name]
|
| 1086 |
+
combined = list1 + list2
|
| 1087 |
+
for key in name_to_users.keys():
|
| 1088 |
+
if name_to_users[key] is list1 or name_to_users[key] is list2:
|
| 1089 |
+
name_to_users[key] = combined
|
| 1090 |
+
elif node1_name in name_to_users:
|
| 1091 |
+
name_to_users[node2_name] = name_to_users[node1_name]
|
| 1092 |
+
else:
|
| 1093 |
+
name_to_users[node1_name] = name_to_users[node2_name]
|
| 1094 |
+
|
| 1095 |
+
def rename(n):
|
| 1096 |
+
if n in self.mutation_renames:
|
| 1097 |
+
return rename(self.mutation_renames[n])
|
| 1098 |
+
return n
|
| 1099 |
+
|
| 1100 |
+
def dep_closure(node_name):
|
| 1101 |
+
reachable_names = {node_name}
|
| 1102 |
+
node = self.name_to_node[node_name]
|
| 1103 |
+
write_dep = list(node.read_writes.writes)[0]
|
| 1104 |
+
for read_dep in node.read_writes.reads:
|
| 1105 |
+
if (
|
| 1106 |
+
read_dep.name in self.name_to_node
|
| 1107 |
+
and isinstance(read_dep, dependencies.MemoryDep)
|
| 1108 |
+
and isinstance(write_dep, dependencies.MemoryDep)
|
| 1109 |
+
and read_dep.index == write_dep.index
|
| 1110 |
+
and read_dep.size == write_dep.size
|
| 1111 |
+
):
|
| 1112 |
+
reachable_names.update(dep_closure(read_dep.name))
|
| 1113 |
+
return reachable_names
|
| 1114 |
+
|
| 1115 |
+
def add_user(used_by_name, user_node, can_inplace=False, is_weak=False):
|
| 1116 |
+
name_to_users[rename(used_by_name)].append(
|
| 1117 |
+
NodeUser(user_node, can_inplace, is_weak)
|
| 1118 |
+
)
|
| 1119 |
+
|
| 1120 |
+
for node in self.nodes:
|
| 1121 |
+
# a node will mutate either 0 or 1 buffers
|
| 1122 |
+
for alt_name in node.get_mutations():
|
| 1123 |
+
alt_name = rename(alt_name)
|
| 1124 |
+
# this node must run after the prior writer
|
| 1125 |
+
add_user(alt_name, node)
|
| 1126 |
+
node.add_mutation_dep(StarDep(alt_name))
|
| 1127 |
+
for other_node in name_to_users[alt_name]:
|
| 1128 |
+
# this node must run after all prior readers
|
| 1129 |
+
other_name = rename(other_node.get_name())
|
| 1130 |
+
known_dep_node_names = dep_closure(node.get_name())
|
| 1131 |
+
if other_name not in known_dep_node_names:
|
| 1132 |
+
# If this node already directly or indirectly depends on other_node,
|
| 1133 |
+
# we don't need to insert an extra dep.
|
| 1134 |
+
node.add_mutation_dep(WeakDep(other_name))
|
| 1135 |
+
add_user(other_name, node, is_weak=True)
|
| 1136 |
+
|
| 1137 |
+
# add normal non-mutation dependencies
|
| 1138 |
+
for read in node.read_writes.reads:
|
| 1139 |
+
is_weak = isinstance(read, WeakDep)
|
| 1140 |
+
add_user(read.name, node, node.can_inplace(read), is_weak)
|
| 1141 |
+
|
| 1142 |
+
node.update_mutated_names(self.mutation_renames)
|
| 1143 |
+
|
| 1144 |
+
# update our renaming scheme for the next iteration
|
| 1145 |
+
for alt_name in node.get_mutations():
|
| 1146 |
+
self.mutation_renames[rename(alt_name)] = node.get_name()
|
| 1147 |
+
self.mutation_renames[alt_name] = node.get_name()
|
| 1148 |
+
self.mutation_real_name[node.get_name()] = self.mutation_real_name.get(
|
| 1149 |
+
alt_name, alt_name
|
| 1150 |
+
)
|
| 1151 |
+
|
| 1152 |
+
# make sure outputs aren't dead-code-eliminated
|
| 1153 |
+
for node_name in V.graph.get_output_names():
|
| 1154 |
+
add_user(node_name, OutputNode(StarDep(node_name)))
|
| 1155 |
+
|
| 1156 |
+
# make sure input mutation isn't dead-code-eliminated
|
| 1157 |
+
for name in self.mutation_renames:
|
| 1158 |
+
if name in V.graph.graph_inputs:
|
| 1159 |
+
add_user(name, OutputNode(StarDep(name)))
|
| 1160 |
+
V.graph.mutated_inputs.add(name)
|
| 1161 |
+
|
| 1162 |
+
inp_names = {
|
| 1163 |
+
name: index for index, name in enumerate(V.graph.graph_inputs.keys())
|
| 1164 |
+
}
|
| 1165 |
+
V.graph.mutated_input_idxs = [
|
| 1166 |
+
inp_names[name] for name in V.graph.mutated_inputs
|
| 1167 |
+
]
|
| 1168 |
+
|
| 1169 |
+
# copy users information onto the nodes
|
| 1170 |
+
for node in self.nodes:
|
| 1171 |
+
node.set_users(name_to_users[node.get_name()])
|
| 1172 |
+
|
| 1173 |
+
# populate inverse_users
|
| 1174 |
+
for node in self.nodes:
|
| 1175 |
+
for user in node.users:
|
| 1176 |
+
user.node.inverse_users.append(node)
|
| 1177 |
+
|
| 1178 |
+
def dead_node_elimination(self):
|
| 1179 |
+
"""
|
| 1180 |
+
Remove any nodes without users
|
| 1181 |
+
"""
|
| 1182 |
+
again = True # repeat until a fixed point
|
| 1183 |
+
while again:
|
| 1184 |
+
updated_nodes = []
|
| 1185 |
+
for node in self.nodes:
|
| 1186 |
+
|
| 1187 |
+
def can_eliminate_user(user: NodeUser):
|
| 1188 |
+
return user.is_weak or user.get_name() in V.graph.removed_buffers
|
| 1189 |
+
|
| 1190 |
+
can_eliminate = not node.has_side_effects() and all(
|
| 1191 |
+
can_eliminate_user(u) for u in node.users
|
| 1192 |
+
)
|
| 1193 |
+
|
| 1194 |
+
if not can_eliminate:
|
| 1195 |
+
updated_nodes.append(node)
|
| 1196 |
+
else:
|
| 1197 |
+
# dead code
|
| 1198 |
+
log.debug("removed dead node: %s", node.get_name())
|
| 1199 |
+
V.graph.removed_buffers.add(node.get_name())
|
| 1200 |
+
|
| 1201 |
+
again = len(self.nodes) > len(updated_nodes)
|
| 1202 |
+
self.nodes = updated_nodes
|
| 1203 |
+
|
| 1204 |
+
# Prune any WeakDeps no longer needed
|
| 1205 |
+
for node in self.nodes:
|
| 1206 |
+
node.prune_weak_deps()
|
| 1207 |
+
|
| 1208 |
+
def topological_sort_schedule(self):
|
| 1209 |
+
"""
|
| 1210 |
+
Ensure self.nodes is in topologically sorted order
|
| 1211 |
+
"""
|
| 1212 |
+
seen = set()
|
| 1213 |
+
name_to_node = dict()
|
| 1214 |
+
result = []
|
| 1215 |
+
|
| 1216 |
+
def visit(n):
|
| 1217 |
+
if n not in seen:
|
| 1218 |
+
seen.add(n)
|
| 1219 |
+
for dep in sorted(n.unmet_dependencies, key=lambda d: d.name):
|
| 1220 |
+
visit(name_to_node[dep.name])
|
| 1221 |
+
result.append(n)
|
| 1222 |
+
|
| 1223 |
+
for node in self.nodes:
|
| 1224 |
+
for name in node.get_names():
|
| 1225 |
+
name_to_node[name] = node
|
| 1226 |
+
for node in self.nodes:
|
| 1227 |
+
visit(node)
|
| 1228 |
+
self.nodes = result
|
| 1229 |
+
|
| 1230 |
+
def compute_predecessors(self):
|
| 1231 |
+
"""
|
| 1232 |
+
Populate each node.recursive_predecessors
|
| 1233 |
+
"""
|
| 1234 |
+
# note self.nodes is topologically sorted
|
| 1235 |
+
name_to_predecessors = {}
|
| 1236 |
+
for node in self.nodes:
|
| 1237 |
+
recursive_predecessors = set()
|
| 1238 |
+
for dep in node.unmet_dependencies:
|
| 1239 |
+
recursive_predecessors.add(dep.name)
|
| 1240 |
+
recursive_predecessors |= name_to_predecessors[dep.name]
|
| 1241 |
+
name_to_predecessors[node.get_name()] = recursive_predecessors
|
| 1242 |
+
node.recursive_predecessors = recursive_predecessors
|
| 1243 |
+
|
| 1244 |
+
for order, node in enumerate(self.nodes):
|
| 1245 |
+
node.min_order = order
|
| 1246 |
+
node.max_order = order
|
| 1247 |
+
|
| 1248 |
+
def fuse_nodes(self):
|
| 1249 |
+
"""
|
| 1250 |
+
Mutates self.nodes to combine nodes into FusedSchedulerNodes.
|
| 1251 |
+
"""
|
| 1252 |
+
for _ in range(10):
|
| 1253 |
+
old_len = len(self.nodes)
|
| 1254 |
+
self.fuse_nodes_once()
|
| 1255 |
+
if len(self.nodes) == old_len:
|
| 1256 |
+
break
|
| 1257 |
+
|
| 1258 |
+
def fuse_nodes_once(self):
|
| 1259 |
+
"""
|
| 1260 |
+
Mutates self.nodes to combine nodes into FusedSchedulerNodes.
|
| 1261 |
+
|
| 1262 |
+
This relies on two key functions to control the logic:
|
| 1263 |
+
- self.can_fuses(): checks if a fusion is legal
|
| 1264 |
+
- self.score_fusion(): assigns priority to a given fusion
|
| 1265 |
+
"""
|
| 1266 |
+
fused_nodes = set(self.nodes)
|
| 1267 |
+
for node1, node2 in self.get_possible_fusions():
|
| 1268 |
+
node1 = self.name_to_fused_node[node1.get_first_name()]
|
| 1269 |
+
node2 = self.name_to_fused_node[node2.get_first_name()]
|
| 1270 |
+
if self.can_fuse(node1, node2) and not self.will_fusion_create_cycle(
|
| 1271 |
+
node1, node2
|
| 1272 |
+
):
|
| 1273 |
+
node3 = fuse(node1, node2)
|
| 1274 |
+
fused_nodes.remove(node1)
|
| 1275 |
+
fused_nodes.remove(node2)
|
| 1276 |
+
fused_nodes.add(node3)
|
| 1277 |
+
self.name_to_fused_node.update(
|
| 1278 |
+
{n.get_name(): node3 for n in node3.get_nodes()}
|
| 1279 |
+
)
|
| 1280 |
+
self.nodes = sorted(fused_nodes, key=lambda x: x.min_order)
|
| 1281 |
+
self.topological_sort_schedule()
|
| 1282 |
+
self.prune_redundant_deps()
|
| 1283 |
+
|
| 1284 |
+
def prune_redundant_deps(self):
|
| 1285 |
+
for node in self.nodes:
|
| 1286 |
+
node.prune_redundant_deps(self.name_to_fused_node)
|
| 1287 |
+
|
| 1288 |
+
def get_possible_fusions(self):
|
| 1289 |
+
"""
|
| 1290 |
+
Helper to find all legal fusion opportunities, sorted by self.score_fusion()
|
| 1291 |
+
"""
|
| 1292 |
+
possible_fusions = []
|
| 1293 |
+
seen = set()
|
| 1294 |
+
|
| 1295 |
+
def check_all_pairs(nodes):
|
| 1296 |
+
for node1_index, node1 in enumerate(nodes):
|
| 1297 |
+
for node2 in nodes[node1_index + 1 :]:
|
| 1298 |
+
key = (node1, node2)
|
| 1299 |
+
if key in seen:
|
| 1300 |
+
continue
|
| 1301 |
+
seen.add(key)
|
| 1302 |
+
|
| 1303 |
+
if self.can_fuse(node1, node2):
|
| 1304 |
+
possible_fusions.append(key)
|
| 1305 |
+
elif (node2.is_template() or node2.is_foreach()) and self.can_fuse(
|
| 1306 |
+
node2, node1
|
| 1307 |
+
):
|
| 1308 |
+
# foreach fusions and epilogue fusions are order dependent
|
| 1309 |
+
possible_fusions.append((node2, node1))
|
| 1310 |
+
|
| 1311 |
+
buffer_names_grouping = collections.defaultdict(list)
|
| 1312 |
+
for node in self.nodes:
|
| 1313 |
+
for buf in node.used_buffer_names():
|
| 1314 |
+
buffer_names_grouping[buf].append(node)
|
| 1315 |
+
for node_grouping in buffer_names_grouping.values():
|
| 1316 |
+
check_all_pairs(node_grouping)
|
| 1317 |
+
|
| 1318 |
+
if config.aggressive_fusion:
|
| 1319 |
+
group_grouping = collections.defaultdict(list)
|
| 1320 |
+
for node in self.nodes:
|
| 1321 |
+
group = getattr(node, "group", None)
|
| 1322 |
+
if group:
|
| 1323 |
+
group_grouping[group].append(node)
|
| 1324 |
+
for node_grouping in group_grouping.values():
|
| 1325 |
+
check_all_pairs(node_grouping)
|
| 1326 |
+
|
| 1327 |
+
return sorted(possible_fusions, key=self.score_fusion_key, reverse=True)
|
| 1328 |
+
|
| 1329 |
+
def will_fusion_create_cycle(self, node1, node2):
|
| 1330 |
+
"""Finds whether there's a path from src to dst caused indirectly by fusion"""
|
| 1331 |
+
|
| 1332 |
+
def check(node):
|
| 1333 |
+
if isinstance(node, FusedSchedulerNode) and node not in visited:
|
| 1334 |
+
visited.add(node)
|
| 1335 |
+
cond0 = bool(combined_names & node.recursive_predecessors)
|
| 1336 |
+
|
| 1337 |
+
if cond0:
|
| 1338 |
+
return cond0
|
| 1339 |
+
|
| 1340 |
+
names = node.get_names()
|
| 1341 |
+
shortcut = names.issubset(combined_predecessors)
|
| 1342 |
+
|
| 1343 |
+
if shortcut:
|
| 1344 |
+
return cond0
|
| 1345 |
+
else:
|
| 1346 |
+
return any(
|
| 1347 |
+
check(self.name_to_fused_node[n])
|
| 1348 |
+
for n in node.recursive_predecessors - combined_predecessors
|
| 1349 |
+
)
|
| 1350 |
+
return False
|
| 1351 |
+
|
| 1352 |
+
visited = set()
|
| 1353 |
+
combined_names = node1.get_names() | node2.get_names()
|
| 1354 |
+
combined_predecessors = (
|
| 1355 |
+
node1.recursive_predecessors | node2.recursive_predecessors
|
| 1356 |
+
) - combined_names
|
| 1357 |
+
return any(check(self.name_to_fused_node[n]) for n in combined_predecessors)
|
| 1358 |
+
|
| 1359 |
+
def can_fusion_increase_peak_memory(
|
| 1360 |
+
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
|
| 1361 |
+
):
|
| 1362 |
+
"""
|
| 1363 |
+
This function prevents fusion for nodes that can increase memory
|
| 1364 |
+
footprint. This problem is more common in horizontal fusion, where nodes
|
| 1365 |
+
that are far apart in the original order get fused, lengthening the live
|
| 1366 |
+
intervals of tensors. This is very evident in models with activation
|
| 1367 |
+
checkpointing, where the recomputed nodes from different checkpointed
|
| 1368 |
+
regions get fused and significantly increase the memory footprint.
|
| 1369 |
+
|
| 1370 |
+
The current attempt is a quick, possibly hacky, heuristic to prevent the
|
| 1371 |
+
fusion of nodes that are far away in the original order.
|
| 1372 |
+
|
| 1373 |
+
A better but difficult to implement heursitic would be to use live
|
| 1374 |
+
intervals of the buffers, find region of peak pressure in the original
|
| 1375 |
+
program and prevent fusion that crosses that peak region. We might need
|
| 1376 |
+
special care or good approximation in this implementation, as fusion of
|
| 1377 |
+
node changes live intervals, and re-computing live intervals and peak
|
| 1378 |
+
memory after each fusion can introduce large compilation overhead.
|
| 1379 |
+
"""
|
| 1380 |
+
proximity_score = max(
|
| 1381 |
+
abs(node1.min_order - node2.max_order),
|
| 1382 |
+
abs(node2.min_order - node1.max_order),
|
| 1383 |
+
)
|
| 1384 |
+
return proximity_score > 64
|
| 1385 |
+
|
| 1386 |
+
def can_fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 1387 |
+
"""
|
| 1388 |
+
Determine if it is possible to combine node1 and node2 into a
|
| 1389 |
+
single fused node.
|
| 1390 |
+
"""
|
| 1391 |
+
|
| 1392 |
+
if node1 is node2:
|
| 1393 |
+
return False
|
| 1394 |
+
if (
|
| 1395 |
+
isinstance(node1, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
|
| 1396 |
+
and not node1.is_template()
|
| 1397 |
+
):
|
| 1398 |
+
return False
|
| 1399 |
+
if (
|
| 1400 |
+
isinstance(node2, (ExternKernelSchedulerNode, NopKernelSchedulerNode))
|
| 1401 |
+
and not node2.is_template()
|
| 1402 |
+
):
|
| 1403 |
+
return False
|
| 1404 |
+
|
| 1405 |
+
if node1.is_foreach() or node2.is_foreach():
|
| 1406 |
+
return ForeachKernelSchedulerNode.can_fuse(node1, node2)
|
| 1407 |
+
|
| 1408 |
+
if node2.get_names() & node1.recursive_predecessors:
|
| 1409 |
+
return False # node2 must go before node1
|
| 1410 |
+
|
| 1411 |
+
if node2.is_template():
|
| 1412 |
+
return False # only epilogues
|
| 1413 |
+
if node1.is_template() and (
|
| 1414 |
+
node2.has_aliasing_or_mutation()
|
| 1415 |
+
or node2.is_reduction()
|
| 1416 |
+
or not config.epilogue_fusion
|
| 1417 |
+
):
|
| 1418 |
+
return False
|
| 1419 |
+
|
| 1420 |
+
device = node1.get_device()
|
| 1421 |
+
if device != node2.get_device():
|
| 1422 |
+
return False # wrong device
|
| 1423 |
+
|
| 1424 |
+
no_shared_data = self.score_fusion_memory(node1, node2) == 0
|
| 1425 |
+
if no_shared_data and (
|
| 1426 |
+
not config.aggressive_fusion or node1.is_reduction() or node2.is_reduction()
|
| 1427 |
+
):
|
| 1428 |
+
return False # heuristic not needed for correctness
|
| 1429 |
+
|
| 1430 |
+
if (
|
| 1431 |
+
not node1.is_foreach()
|
| 1432 |
+
and not node2.is_foreach()
|
| 1433 |
+
and len(node1.get_nodes()) + len(node2.get_nodes()) > config.max_fusion_size
|
| 1434 |
+
):
|
| 1435 |
+
return False # heuristic not needed for correctness
|
| 1436 |
+
|
| 1437 |
+
if node1.get_names() & node2.recursive_predecessors:
|
| 1438 |
+
# node2 depends on node1 outputs
|
| 1439 |
+
if not self.can_fuse_vertical(node1, node2):
|
| 1440 |
+
return False
|
| 1441 |
+
return self.get_backend(device).can_fuse_vertical(node1, node2)
|
| 1442 |
+
else: # nodes don't depend on each other, but may have common reads
|
| 1443 |
+
if self.can_fusion_increase_peak_memory(node1, node2):
|
| 1444 |
+
return False
|
| 1445 |
+
return self.get_backend(device).can_fuse_horizontal(node1, node2)
|
| 1446 |
+
|
| 1447 |
+
def can_fuse_vertical(self, node1, node2):
|
| 1448 |
+
"""
|
| 1449 |
+
Check if it is legal to fuse a consumer (node2) into a producer (node1).
|
| 1450 |
+
|
| 1451 |
+
We can fuse them if all the reads of node2 either match
|
| 1452 |
+
corresponding writes in node1, or are written by nodes that can
|
| 1453 |
+
be scheduled before the fusion of node1 and node2.
|
| 1454 |
+
"""
|
| 1455 |
+
node1_names = node1.get_names()
|
| 1456 |
+
computed_deps = set()
|
| 1457 |
+
|
| 1458 |
+
for rd in node2.unmet_dependencies:
|
| 1459 |
+
for cd in node1.read_writes.writes:
|
| 1460 |
+
# StarDep doesn't match MemoryDep, different indices don't match
|
| 1461 |
+
# However, broadcasting sometimes strips dimensions, and if that's the case
|
| 1462 |
+
# we still can match unmet dep
|
| 1463 |
+
# if there's indirect indexing, don't match it
|
| 1464 |
+
if (
|
| 1465 |
+
rd.name == cd.name
|
| 1466 |
+
and type(rd) == type(cd)
|
| 1467 |
+
and not free_symbol_has(rd.index, "tmp")
|
| 1468 |
+
and not free_symbol_has(cd.index, "tmp")
|
| 1469 |
+
and rd.index == cd.index
|
| 1470 |
+
and len(rd.size) >= len(cd.size)
|
| 1471 |
+
and rd.size[: len(cd.size)] == cd.size
|
| 1472 |
+
):
|
| 1473 |
+
computed_deps.add(rd)
|
| 1474 |
+
|
| 1475 |
+
remaining_deps = {dep.name for dep in node2.unmet_dependencies - computed_deps}
|
| 1476 |
+
if remaining_deps & node1_names:
|
| 1477 |
+
# MemoryDeps didn't match and read different locations of the same buffer.
|
| 1478 |
+
# Examples here include:
|
| 1479 |
+
# - MemoryDep("foo", x) != MemoryDep("foo", x + 1)
|
| 1480 |
+
# - MemoryDep("foo", x) != StarDep("foo")
|
| 1481 |
+
return False
|
| 1482 |
+
for name in remaining_deps:
|
| 1483 |
+
if node1_names & self.name_to_fused_node[name].recursive_predecessors:
|
| 1484 |
+
return False
|
| 1485 |
+
return True
|
| 1486 |
+
|
| 1487 |
+
def score_fusion(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 1488 |
+
"""
|
| 1489 |
+
Assign a score (higher comes first) to the fusion of node1
|
| 1490 |
+
and node2. When different fusions conflict with each other,
|
| 1491 |
+
this is the way we decide what order to run them in.
|
| 1492 |
+
|
| 1493 |
+
Our current score is based on:
|
| 1494 |
+
- Estimate of the saved memory operations
|
| 1495 |
+
- Fusions closer together in original order
|
| 1496 |
+
"""
|
| 1497 |
+
memory_score = self.score_fusion_memory(node1, node2)
|
| 1498 |
+
proximity_score = -max(
|
| 1499 |
+
abs(node1.min_order - node2.max_order),
|
| 1500 |
+
abs(node2.min_order - node1.max_order),
|
| 1501 |
+
)
|
| 1502 |
+
return (
|
| 1503 |
+
node1.is_template() == config.epilogue_fusion_first and memory_score > 0,
|
| 1504 |
+
node1.is_reduction() == node2.is_reduction() and memory_score > 0,
|
| 1505 |
+
memory_score,
|
| 1506 |
+
proximity_score,
|
| 1507 |
+
)
|
| 1508 |
+
|
| 1509 |
+
def score_fusion_memory(self, node1, node2):
|
| 1510 |
+
"""
|
| 1511 |
+
The first term in our fusion score that estimates number of saved memory operations.
|
| 1512 |
+
"""
|
| 1513 |
+
common_memory_deps = (node1.read_writes.reads | node1.read_writes.writes) & (
|
| 1514 |
+
node2.read_writes.reads | node2.read_writes.writes
|
| 1515 |
+
)
|
| 1516 |
+
return sum(dep.numbytes_hint() for dep in common_memory_deps)
|
| 1517 |
+
|
| 1518 |
+
def score_fusion_key(self, nodes):
|
| 1519 |
+
"""
|
| 1520 |
+
Shim for list.sort(key=...)
|
| 1521 |
+
"""
|
| 1522 |
+
node1, node2 = nodes
|
| 1523 |
+
return self.score_fusion(node1, node2)
|
| 1524 |
+
|
| 1525 |
+
def compute_last_usage(self):
|
| 1526 |
+
"""
|
| 1527 |
+
Populate node.last_usage recursively (also for the nodes within a FusedSchedulerNode)
|
| 1528 |
+
"""
|
| 1529 |
+
|
| 1530 |
+
future_used_buffers = set()
|
| 1531 |
+
for node_name in V.graph.get_output_names():
|
| 1532 |
+
future_used_buffers.add(node_name)
|
| 1533 |
+
|
| 1534 |
+
for node in reversed(self.nodes):
|
| 1535 |
+
node.set_last_usage(future_used_buffers, self.mutation_real_name)
|
| 1536 |
+
future_used_buffers.update(node.last_usage)
|
| 1537 |
+
|
| 1538 |
+
def free_buffers(self):
|
| 1539 |
+
"""Free any buffers that are no longer needed"""
|
| 1540 |
+
for name in sorted(
|
| 1541 |
+
self.buffer_names_to_free
|
| 1542 |
+
- V.graph.removed_buffers
|
| 1543 |
+
- V.graph.wrapper_code.freed
|
| 1544 |
+
):
|
| 1545 |
+
if name in self.name_to_node:
|
| 1546 |
+
node = self.name_to_node[name]
|
| 1547 |
+
if node.can_free():
|
| 1548 |
+
V.graph.wrapper_code.codegen_free(node.node)
|
| 1549 |
+
elif name in V.graph.graph_inputs:
|
| 1550 |
+
storage = V.graph.graph_inputs[name].data
|
| 1551 |
+
assert storage.is_input_buffer()
|
| 1552 |
+
V.graph.wrapper_code.codegen_free(storage.data)
|
| 1553 |
+
|
| 1554 |
+
self.buffer_names_to_free.clear()
|
| 1555 |
+
|
| 1556 |
+
def remove_kernel_local_buffers(self):
|
| 1557 |
+
"""
|
| 1558 |
+
Any buffers that are both created and have a last use in the
|
| 1559 |
+
same kernel can be removed.
|
| 1560 |
+
"""
|
| 1561 |
+
|
| 1562 |
+
names_to_remove = (
|
| 1563 |
+
V.kernel.store_buffer_names & self.buffer_names_no_longer_needed
|
| 1564 |
+
)
|
| 1565 |
+
|
| 1566 |
+
def remove_filter(n):
|
| 1567 |
+
return (
|
| 1568 |
+
n not in V.kernel.must_keep_buffers
|
| 1569 |
+
and n not in V.kernel.args.input_buffers
|
| 1570 |
+
and n not in self.mutation_renames
|
| 1571 |
+
and n not in self.mutation_real_name
|
| 1572 |
+
)
|
| 1573 |
+
|
| 1574 |
+
names_to_remove = list(filter(remove_filter, names_to_remove))
|
| 1575 |
+
|
| 1576 |
+
for name in names_to_remove:
|
| 1577 |
+
if name in V.kernel.args.inplace_buffers:
|
| 1578 |
+
buf = V.kernel.args.inplace_buffers[name]
|
| 1579 |
+
if isinstance(buf, str) and buf.startswith("REMOVED"):
|
| 1580 |
+
continue
|
| 1581 |
+
remove = all(n in names_to_remove for n in buf.other_names)
|
| 1582 |
+
if remove:
|
| 1583 |
+
self.remove_inplace_buffer(name)
|
| 1584 |
+
V.graph.inplaced_to_remove.add(name)
|
| 1585 |
+
else:
|
| 1586 |
+
self.remove_buffer(name)
|
| 1587 |
+
|
| 1588 |
+
def remove_buffer(self, name):
|
| 1589 |
+
# Assign a special value instead of deleting the entry
|
| 1590 |
+
# because we still rely on output_buffers's length to
|
| 1591 |
+
# generate unique arg name.
|
| 1592 |
+
log.debug("remove_buffer(%r)", name)
|
| 1593 |
+
V.kernel.args.output_buffers[name] = "REMOVED"
|
| 1594 |
+
V.graph.removed_buffers.add(name)
|
| 1595 |
+
|
| 1596 |
+
def remove_inplace_buffer(self, name):
|
| 1597 |
+
log.debug("removing_inplace_buffer(%r)", name)
|
| 1598 |
+
inner_name = V.kernel.args.inplace_buffers[name].inner_name
|
| 1599 |
+
V.kernel.args.inplace_buffers[name] = inner_name.replace(
|
| 1600 |
+
"in_out_ptr", "REMOVED"
|
| 1601 |
+
)
|
| 1602 |
+
V.graph.removed_buffers.add(name)
|
| 1603 |
+
|
| 1604 |
+
def flush(self):
|
| 1605 |
+
for backend in self.backends.values():
|
| 1606 |
+
backend.flush()
|
| 1607 |
+
self.free_buffers()
|
| 1608 |
+
|
| 1609 |
+
def codegen_extern_call(self, scheduler_node: ExternKernelSchedulerNode):
|
| 1610 |
+
assert isinstance(scheduler_node, ExternKernelSchedulerNode)
|
| 1611 |
+
scheduler_node.allocate()
|
| 1612 |
+
node = scheduler_node.node
|
| 1613 |
+
node.codegen(V.graph.wrapper_code)
|
| 1614 |
+
self.free_buffers()
|
| 1615 |
+
|
| 1616 |
+
def create_backend(self, device: torch.device):
|
| 1617 |
+
assert (
|
| 1618 |
+
device.type != "cuda" or device.index is not None
|
| 1619 |
+
), f"{device} should have been normalized in lowering"
|
| 1620 |
+
V.graph.device_types.add(device.type)
|
| 1621 |
+
V.graph.add_device_idx(device.index)
|
| 1622 |
+
|
| 1623 |
+
device_scheduling = get_scheduling_for_device(device.type)
|
| 1624 |
+
if device_scheduling is None:
|
| 1625 |
+
raise RuntimeError(f"Unsupported device type: {device.type}")
|
| 1626 |
+
|
| 1627 |
+
if device.type == "cuda" and not has_triton():
|
| 1628 |
+
device_props = torch.cuda.get_device_properties(device)
|
| 1629 |
+
if device_props.major < 7:
|
| 1630 |
+
raise RuntimeError(
|
| 1631 |
+
f"Found {device_props.name} which is too old to be supported by the triton GPU compiler, which is used as the backend. Triton only supports devices of CUDA Capability >= 7.0, but your device is of CUDA capability {device_props.major}.{device_props.minor}" # noqa: B950
|
| 1632 |
+
)
|
| 1633 |
+
else:
|
| 1634 |
+
raise RuntimeError(
|
| 1635 |
+
"Cannot find a working triton installation. More information on installing Triton can be found at https://github.com/openai/triton" # noqa: B950
|
| 1636 |
+
)
|
| 1637 |
+
|
| 1638 |
+
return device_scheduling(self)
|
| 1639 |
+
|
| 1640 |
+
def get_backend(self, device: torch.device):
|
| 1641 |
+
if device not in self.backends:
|
| 1642 |
+
self.backends[device] = self.create_backend(device)
|
| 1643 |
+
return self.backends[device]
|
| 1644 |
+
|
| 1645 |
+
def enter_context(self, node):
|
| 1646 |
+
def get_order(n):
|
| 1647 |
+
if n not in self.origin_to_index:
|
| 1648 |
+
self.origin_to_index.update({n: i for i, n in enumerate(n.graph.nodes)})
|
| 1649 |
+
return self.origin_to_index[n]
|
| 1650 |
+
|
| 1651 |
+
origins = [(get_order(e), e) for n in node.get_nodes() for e in n.node.origins]
|
| 1652 |
+
if origins:
|
| 1653 |
+
_, last = max(origins)
|
| 1654 |
+
V.graph.wrapper_code.enter_context(last)
|
| 1655 |
+
|
| 1656 |
+
@dynamo_timed
|
| 1657 |
+
def codegen(self):
|
| 1658 |
+
for node in self.nodes:
|
| 1659 |
+
self.enter_context(node)
|
| 1660 |
+
self.buffer_names_no_longer_needed.update(node.last_usage)
|
| 1661 |
+
|
| 1662 |
+
if not isinstance(node, NopKernelSchedulerNode):
|
| 1663 |
+
device = node.get_device()
|
| 1664 |
+
if (
|
| 1665 |
+
device != self.current_device
|
| 1666 |
+
or node.is_extern()
|
| 1667 |
+
or node.is_template()
|
| 1668 |
+
):
|
| 1669 |
+
self.flush()
|
| 1670 |
+
if device != self.current_device:
|
| 1671 |
+
if device.type == "cuda":
|
| 1672 |
+
if self.current_device and self.current_device.type == "cuda":
|
| 1673 |
+
V.graph.wrapper_code.codegen_device_guard_exit()
|
| 1674 |
+
assert device.index is not None, "device should have an index"
|
| 1675 |
+
V.graph.wrapper_code.codegen_device_guard_enter(device.index)
|
| 1676 |
+
elif self.current_device and self.current_device.type == "cuda":
|
| 1677 |
+
V.graph.wrapper_code.codegen_device_guard_exit()
|
| 1678 |
+
self.current_device = device
|
| 1679 |
+
|
| 1680 |
+
self.buffer_names_to_free.update(node.last_usage)
|
| 1681 |
+
|
| 1682 |
+
if node.is_template():
|
| 1683 |
+
node, *epilogue = node.get_nodes()
|
| 1684 |
+
self.get_backend(device).codegen_template(node, epilogue)
|
| 1685 |
+
elif node.is_extern():
|
| 1686 |
+
self.codegen_extern_call(node)
|
| 1687 |
+
elif node.is_foreach():
|
| 1688 |
+
self.get_backend(device).codegen_foreach(node)
|
| 1689 |
+
elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
|
| 1690 |
+
self.get_backend(device).codegen_nodes(node.get_nodes())
|
| 1691 |
+
else:
|
| 1692 |
+
assert isinstance(node, NopKernelSchedulerNode)
|
| 1693 |
+
node.allocate()
|
| 1694 |
+
|
| 1695 |
+
if config.triton.debug_sync_kernel:
|
| 1696 |
+
self.get_backend(device).codegen_sync()
|
| 1697 |
+
|
| 1698 |
+
self.available_buffer_names.update(node.get_names())
|
| 1699 |
+
|
| 1700 |
+
self.flush()
|
| 1701 |
+
|
| 1702 |
+
|
| 1703 |
+
class BaseScheduling:
|
| 1704 |
+
def can_fuse_vertical(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 1705 |
+
"""
|
| 1706 |
+
Check whether node1 and node2 can be vertically fused or not.
|
| 1707 |
+
"""
|
| 1708 |
+
raise NotImplementedError()
|
| 1709 |
+
|
| 1710 |
+
def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
|
| 1711 |
+
"""
|
| 1712 |
+
Check whether node1 and node2 can be horizontally fused or not.
|
| 1713 |
+
"""
|
| 1714 |
+
raise NotImplementedError()
|
| 1715 |
+
|
| 1716 |
+
def group_fn(self, sizes):
|
| 1717 |
+
"""
|
| 1718 |
+
Process the iteration sizes in case a transformation needs to be applied.
|
| 1719 |
+
"""
|
| 1720 |
+
raise NotImplementedError()
|
| 1721 |
+
|
| 1722 |
+
def codegen_template(
|
| 1723 |
+
self, template_node: BaseSchedulerNode, epilogue_nodes: List[BaseSchedulerNode]
|
| 1724 |
+
):
|
| 1725 |
+
"""
|
| 1726 |
+
Given a template node, generate a kernel.
|
| 1727 |
+
|
| 1728 |
+
This function is only available for triton now. If the third-party backend behaves as a sub-class
|
| 1729 |
+
of TritonScheduling, it can override it or reuse it.
|
| 1730 |
+
"""
|
| 1731 |
+
raise NotImplementedError()
|
| 1732 |
+
|
| 1733 |
+
def codegen_nodes(self, nodes: List[BaseSchedulerNode]):
|
| 1734 |
+
"""
|
| 1735 |
+
Generate a kernel given a list of pre-fused nodes.
|
| 1736 |
+
"""
|
| 1737 |
+
raise NotImplementedError()
|
| 1738 |
+
|
| 1739 |
+
def codegen_sync(self):
|
| 1740 |
+
"""
|
| 1741 |
+
Generate synchronization code for the kernel. This method depends on the hardware characteristics.
|
| 1742 |
+
"""
|
| 1743 |
+
raise NotImplementedError()
|
| 1744 |
+
|
| 1745 |
+
def flush(self):
|
| 1746 |
+
"""
|
| 1747 |
+
Flush the generated kernel and python wrapper code to the source code file.
|
| 1748 |
+
"""
|
| 1749 |
+
raise NotImplementedError()
|
llava_next/lib/python3.10/site-packages/torch/_inductor/select_algorithm.py
ADDED
|
@@ -0,0 +1,974 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import sys
|
| 7 |
+
import textwrap
|
| 8 |
+
import time
|
| 9 |
+
from io import StringIO
|
| 10 |
+
|
| 11 |
+
from typing import Any, Dict, List, Type, Union
|
| 12 |
+
from unittest.mock import patch
|
| 13 |
+
|
| 14 |
+
import sympy
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from torch._dynamo.testing import rand_strided
|
| 18 |
+
from torch._dynamo.utils import counters, identity
|
| 19 |
+
|
| 20 |
+
from . import config, ir
|
| 21 |
+
from .autotune_process import BenchmarkRequest, TensorMeta
|
| 22 |
+
from .codecache import code_hash, PersistentCache, PyCodeCache
|
| 23 |
+
|
| 24 |
+
from .codegen.common import IndentedBuffer
|
| 25 |
+
from .codegen.triton import texpr, TritonKernel, TritonPrinter, TritonScheduling
|
| 26 |
+
|
| 27 |
+
from .codegen.triton_utils import config_of, signature_to_meta
|
| 28 |
+
|
| 29 |
+
from .utils import do_bench, sympy_dot, sympy_product, unique
|
| 30 |
+
from .virtualized import V
|
| 31 |
+
|
| 32 |
+
log = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
# correctness checks struggle with fp16/tf32
|
| 35 |
+
VERIFY: Dict[str, Any] = dict()
|
| 36 |
+
PRINT_AUTOTUNE = True
|
| 37 |
+
DEBUG = False
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class KernelNamespace:
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# these objects are imported from the generated wrapper code
|
| 45 |
+
extern_kernels = KernelNamespace()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class PartialRender:
|
| 49 |
+
"""
|
| 50 |
+
Some parts of a template need to be generated at the end, but
|
| 51 |
+
inserted into the template at the start. This allows doing a bunch
|
| 52 |
+
of replacements after the initial render.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self, code, replacement_hooks):
|
| 56 |
+
super().__init__()
|
| 57 |
+
self.code = code
|
| 58 |
+
self.replacement_hooks = replacement_hooks
|
| 59 |
+
|
| 60 |
+
def finalize(self):
|
| 61 |
+
code = self.code
|
| 62 |
+
assert code is not None, "can only be called once"
|
| 63 |
+
self.code = None
|
| 64 |
+
for key, fn in self.replacement_hooks.items():
|
| 65 |
+
code = code.replace(key, fn())
|
| 66 |
+
return code
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class TritonTemplateKernel(TritonKernel):
|
| 70 |
+
def __init__(
|
| 71 |
+
self,
|
| 72 |
+
kernel_name,
|
| 73 |
+
input_nodes,
|
| 74 |
+
output_node,
|
| 75 |
+
defines,
|
| 76 |
+
num_stages,
|
| 77 |
+
num_warps,
|
| 78 |
+
grid_fn,
|
| 79 |
+
meta,
|
| 80 |
+
call_sizes,
|
| 81 |
+
use_jit=True,
|
| 82 |
+
prefix_args=0,
|
| 83 |
+
suffix_args=0,
|
| 84 |
+
epilogue_fn=identity,
|
| 85 |
+
*,
|
| 86 |
+
index_dtype,
|
| 87 |
+
):
|
| 88 |
+
super().__init__(
|
| 89 |
+
sympy_product(output_node.get_size()),
|
| 90 |
+
sympy.Integer(1),
|
| 91 |
+
index_dtype=index_dtype,
|
| 92 |
+
)
|
| 93 |
+
self.input_nodes = input_nodes
|
| 94 |
+
self.output_node = output_node
|
| 95 |
+
self.named_input_nodes = {}
|
| 96 |
+
self.defines = defines
|
| 97 |
+
self.kernel_name = kernel_name
|
| 98 |
+
self.template_mask = None
|
| 99 |
+
self.use_jit = use_jit
|
| 100 |
+
self.num_stages = num_stages
|
| 101 |
+
self.num_warps = num_warps
|
| 102 |
+
self.grid_fn = grid_fn
|
| 103 |
+
self.meta = meta
|
| 104 |
+
self.call_sizes = call_sizes
|
| 105 |
+
# for templates with fixed epilogues
|
| 106 |
+
self.prefix_args = prefix_args
|
| 107 |
+
self.suffix_args = suffix_args
|
| 108 |
+
self.epilogue_fn = epilogue_fn
|
| 109 |
+
self.render_hooks = dict()
|
| 110 |
+
|
| 111 |
+
def jit_line(self):
|
| 112 |
+
if self.use_jit:
|
| 113 |
+
return "@triton.jit"
|
| 114 |
+
|
| 115 |
+
argdefs, _, signature = self.args.python_argdefs()
|
| 116 |
+
triton_meta = {
|
| 117 |
+
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
|
| 118 |
+
"device": V.graph.scheduler.current_device.index,
|
| 119 |
+
"device_type": V.graph.scheduler.current_device.type,
|
| 120 |
+
"constants": {},
|
| 121 |
+
}
|
| 122 |
+
triton_meta["configs"] = [config_of(signature)]
|
| 123 |
+
return (
|
| 124 |
+
f"@template(num_stages={self.num_stages}, num_warps={self.num_warps}, meta={triton_meta!r})\n"
|
| 125 |
+
+ "@triton.jit"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def def_kernel(self, *argnames):
|
| 129 |
+
"""
|
| 130 |
+
Hook called from template code to generate function def and
|
| 131 |
+
needed args.
|
| 132 |
+
"""
|
| 133 |
+
assert all(isinstance(x, str) for x in argnames)
|
| 134 |
+
renames = IndentedBuffer(initial_indent=1)
|
| 135 |
+
|
| 136 |
+
named_args = self.input_nodes[
|
| 137 |
+
self.prefix_args : len(self.input_nodes) - self.suffix_args
|
| 138 |
+
]
|
| 139 |
+
|
| 140 |
+
assert len(argnames) == len(named_args), (
|
| 141 |
+
len(argnames),
|
| 142 |
+
len(named_args),
|
| 143 |
+
self.prefix_args,
|
| 144 |
+
len(self.input_nodes),
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
for input_node in self.input_nodes[: self.prefix_args]:
|
| 148 |
+
# get args in correct order
|
| 149 |
+
self.args.input(input_node.get_name())
|
| 150 |
+
|
| 151 |
+
for name, input_node in zip(argnames, named_args):
|
| 152 |
+
arg_name = f"arg_{name}"
|
| 153 |
+
self.named_input_nodes[name] = input_node
|
| 154 |
+
self.args.input_buffers[input_node.get_name()] = arg_name
|
| 155 |
+
|
| 156 |
+
# The args may be duplicated, so renaming must be after args are de-duplicated.
|
| 157 |
+
for name in argnames:
|
| 158 |
+
input_node = self.named_input_nodes[name]
|
| 159 |
+
arg_name = self.args.input_buffers[input_node.get_name()]
|
| 160 |
+
if input_node.get_layout().offset == 0:
|
| 161 |
+
renames.writeline(f"{name} = {arg_name}")
|
| 162 |
+
else:
|
| 163 |
+
offset = texpr(self.rename_indexing(input_node.get_layout().offset))
|
| 164 |
+
renames.writeline(f"{name} = {arg_name} + {offset}")
|
| 165 |
+
|
| 166 |
+
for input_node in self.input_nodes[len(self.input_nodes) - self.suffix_args :]:
|
| 167 |
+
# get args in correct order
|
| 168 |
+
self.args.input(input_node.get_name())
|
| 169 |
+
|
| 170 |
+
def hook():
|
| 171 |
+
# python_argdefs() cannot be run until after the rest of the template lazily adds more args
|
| 172 |
+
arg_defs, *_ = self.args.python_argdefs()
|
| 173 |
+
return "\n".join(
|
| 174 |
+
[
|
| 175 |
+
"import triton.language as tl",
|
| 176 |
+
"import triton",
|
| 177 |
+
"from torch._inductor.triton_heuristics import template",
|
| 178 |
+
"from torch._inductor.utils import instance_descriptor",
|
| 179 |
+
"from torch._inductor import triton_helpers",
|
| 180 |
+
"",
|
| 181 |
+
self.jit_line(),
|
| 182 |
+
f"def {self.kernel_name}({', '.join(arg_defs)}):",
|
| 183 |
+
self.defines,
|
| 184 |
+
renames.getvalue(),
|
| 185 |
+
]
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
assert "<DEF_KERNEL>" not in self.render_hooks
|
| 189 |
+
self.render_hooks["<DEF_KERNEL>"] = hook
|
| 190 |
+
return "<DEF_KERNEL>"
|
| 191 |
+
|
| 192 |
+
def size(self, name: str, index: int):
|
| 193 |
+
"""
|
| 194 |
+
Hook called from template code to get the size of an arg.
|
| 195 |
+
Will add needed args to pass it in if it is dynamic.
|
| 196 |
+
"""
|
| 197 |
+
assert isinstance(index, int)
|
| 198 |
+
if name is None:
|
| 199 |
+
val = self.output_node.get_size()[index]
|
| 200 |
+
else:
|
| 201 |
+
assert isinstance(name, str)
|
| 202 |
+
val = self.named_input_nodes[name].get_size()[index]
|
| 203 |
+
return texpr(self.rename_indexing(val))
|
| 204 |
+
|
| 205 |
+
def stride(self, name, index):
|
| 206 |
+
"""
|
| 207 |
+
Hook called from template code to get the stride of an arg.
|
| 208 |
+
Will add needed args to pass it in if it is dynamic.
|
| 209 |
+
"""
|
| 210 |
+
assert isinstance(index, int)
|
| 211 |
+
if name is None:
|
| 212 |
+
val = self.output_node.get_stride()[index]
|
| 213 |
+
else:
|
| 214 |
+
assert isinstance(name, str)
|
| 215 |
+
val = self.named_input_nodes[name].get_stride()[index]
|
| 216 |
+
return texpr(self.rename_indexing(val))
|
| 217 |
+
|
| 218 |
+
def store_output(self, indices, val, mask):
|
| 219 |
+
"""
|
| 220 |
+
Hook called from template code to store the final output
|
| 221 |
+
(if the buffer hasn't been optimized away), then append any
|
| 222 |
+
epilogue fusions.
|
| 223 |
+
"""
|
| 224 |
+
assert isinstance(indices, (list, tuple))
|
| 225 |
+
assert isinstance(val, str)
|
| 226 |
+
assert isinstance(mask, str)
|
| 227 |
+
assert self.template_mask is None
|
| 228 |
+
indices = list(map(TritonPrinter.paren, indices))
|
| 229 |
+
index_symbols = [sympy.Symbol(x) for x in indices]
|
| 230 |
+
lengths = [V.graph.sizevars.simplify(s) for s in self.output_node.get_size()]
|
| 231 |
+
assert len(indices) == len(lengths)
|
| 232 |
+
|
| 233 |
+
# glue to make generated code use same indexing from template
|
| 234 |
+
for name, range_tree_entry in zip(
|
| 235 |
+
indices, self.range_trees[0].construct_entries(lengths)
|
| 236 |
+
):
|
| 237 |
+
range_tree_entry.set_name(name)
|
| 238 |
+
contiguous_index = sympy_dot(
|
| 239 |
+
ir.FlexibleLayout.contiguous_strides(lengths), index_symbols
|
| 240 |
+
)
|
| 241 |
+
contiguous_index = self.rename_indexing(contiguous_index)
|
| 242 |
+
self.body.writeline("xindex = " + texpr(contiguous_index))
|
| 243 |
+
self.range_trees[0].lookup(sympy.Integer(1), sympy_product(lengths)).set_name(
|
| 244 |
+
"xindex"
|
| 245 |
+
)
|
| 246 |
+
self.template_mask = mask
|
| 247 |
+
self.template_indices = indices
|
| 248 |
+
output_index = self.output_node.get_layout().make_indexer()(index_symbols)
|
| 249 |
+
output_index = self.rename_indexing(output_index)
|
| 250 |
+
if output_index == contiguous_index:
|
| 251 |
+
output_index = sympy.Symbol("xindex")
|
| 252 |
+
|
| 253 |
+
epilogue_args = [val]
|
| 254 |
+
for input_node in itertools.chain(
|
| 255 |
+
self.input_nodes[: self.prefix_args],
|
| 256 |
+
self.input_nodes[len(self.input_nodes) - self.suffix_args :],
|
| 257 |
+
):
|
| 258 |
+
input_node.freeze_layout()
|
| 259 |
+
epilogue_args.append(input_node.make_loader()(index_symbols))
|
| 260 |
+
|
| 261 |
+
V.ops.store( # type: ignore[attr-defined]
|
| 262 |
+
self.output_node.get_name(),
|
| 263 |
+
output_index,
|
| 264 |
+
self.epilogue_fn(*epilogue_args),
|
| 265 |
+
)
|
| 266 |
+
self.codegen_body()
|
| 267 |
+
|
| 268 |
+
def hook():
|
| 269 |
+
# more stuff might have been added since the codegen_body above
|
| 270 |
+
self.codegen_body()
|
| 271 |
+
return textwrap.indent(self.body.getvalue(), " ").strip()
|
| 272 |
+
|
| 273 |
+
assert "<STORE_OUTPUT>" not in self.render_hooks
|
| 274 |
+
self.render_hooks["<STORE_OUTPUT>"] = hook
|
| 275 |
+
return "<STORE_OUTPUT>"
|
| 276 |
+
|
| 277 |
+
def render(self, template, kwargs):
|
| 278 |
+
return PartialRender(
|
| 279 |
+
template.render(**self.template_env(), **kwargs),
|
| 280 |
+
self.render_hooks,
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
def make_load(self, name, indices, mask):
|
| 284 |
+
"""
|
| 285 |
+
Optional helper called from template code to generate the code
|
| 286 |
+
needed to load from an tensor.
|
| 287 |
+
"""
|
| 288 |
+
assert isinstance(indices, (list, tuple))
|
| 289 |
+
assert isinstance(name, str)
|
| 290 |
+
assert isinstance(mask, str)
|
| 291 |
+
stride = self.named_input_nodes[name].get_stride()
|
| 292 |
+
indices = list(map(TritonPrinter.paren, indices))
|
| 293 |
+
assert len(indices) == len(stride)
|
| 294 |
+
index = " + ".join(
|
| 295 |
+
f"{texpr(self.rename_indexing(s))} * {i}" for s, i in zip(stride, indices)
|
| 296 |
+
)
|
| 297 |
+
return f"tl.load({name} + ({index}), {mask})"
|
| 298 |
+
|
| 299 |
+
def template_env(self):
|
| 300 |
+
"""
|
| 301 |
+
Generate the namespace visible in the template.
|
| 302 |
+
"""
|
| 303 |
+
return {
|
| 304 |
+
fn.__name__: fn
|
| 305 |
+
for fn in [
|
| 306 |
+
self.def_kernel,
|
| 307 |
+
self.size,
|
| 308 |
+
self.stride,
|
| 309 |
+
self.store_output,
|
| 310 |
+
self.make_load,
|
| 311 |
+
]
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
def indexing(
|
| 315 |
+
self,
|
| 316 |
+
index: sympy.Expr,
|
| 317 |
+
*,
|
| 318 |
+
copy_shape=None,
|
| 319 |
+
dense_indexing=False,
|
| 320 |
+
override_mask=None,
|
| 321 |
+
):
|
| 322 |
+
"""
|
| 323 |
+
Override the default indexing to use our custom mask and force
|
| 324 |
+
dense indexing.
|
| 325 |
+
"""
|
| 326 |
+
result, *mask = super().indexing(
|
| 327 |
+
index,
|
| 328 |
+
dense_indexing=False,
|
| 329 |
+
copy_shape=self.template_mask,
|
| 330 |
+
override_mask=self.template_mask,
|
| 331 |
+
)
|
| 332 |
+
return (result, *mask)
|
| 333 |
+
|
| 334 |
+
def initialize_range_tree(self, pid_cache):
|
| 335 |
+
super().initialize_range_tree(pid_cache)
|
| 336 |
+
# ignore default codegen
|
| 337 |
+
self.body.clear()
|
| 338 |
+
self.indexing_code.clear()
|
| 339 |
+
|
| 340 |
+
def call_kernel(self, name: str):
|
| 341 |
+
wrapper = V.graph.wrapper_code
|
| 342 |
+
_, call_args, _ = self.args.python_argdefs()
|
| 343 |
+
call_args = [str(a) for a in call_args]
|
| 344 |
+
|
| 345 |
+
for i in range(len(call_args)):
|
| 346 |
+
if V.graph.is_unspec_arg(call_args[i]):
|
| 347 |
+
call_args[i] = call_args[i] + ".item()"
|
| 348 |
+
if isinstance(call_args[i], sympy.Symbol):
|
| 349 |
+
call_args[i] = texpr(call_args[i])
|
| 350 |
+
|
| 351 |
+
if V.graph.cpp_wrapper:
|
| 352 |
+
wrapper.generate_kernel_call(
|
| 353 |
+
name,
|
| 354 |
+
call_args,
|
| 355 |
+
device_index=V.graph.scheduler.current_device.index,
|
| 356 |
+
)
|
| 357 |
+
else:
|
| 358 |
+
call_args = ", ".join(call_args) # type: ignore[assignment]
|
| 359 |
+
stream_name = wrapper.write_get_cuda_stream(
|
| 360 |
+
V.graph.scheduler.current_device.index
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
wrapper.add_import_once(f"import {self.grid_fn.__module__}")
|
| 364 |
+
meta = wrapper.add_meta_once(self.meta)
|
| 365 |
+
|
| 366 |
+
grid_call = [
|
| 367 |
+
texpr(V.graph.sizevars.simplify(s)) for s in self.call_sizes
|
| 368 |
+
] + [meta]
|
| 369 |
+
grid_call = f"{self.grid_fn.__module__}.{self.grid_fn.__name__}({', '.join(grid_call)})"
|
| 370 |
+
wrapper.writeline(
|
| 371 |
+
f"{name}.run({call_args}, grid={grid_call}, stream={stream_name})"
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
@functools.lru_cache(None)
|
| 376 |
+
def _jinja2_env():
|
| 377 |
+
try:
|
| 378 |
+
import jinja2
|
| 379 |
+
|
| 380 |
+
return jinja2.Environment(
|
| 381 |
+
undefined=jinja2.StrictUndefined,
|
| 382 |
+
)
|
| 383 |
+
except ImportError:
|
| 384 |
+
return None
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
class TritonTemplate:
|
| 388 |
+
index_counter = itertools.count()
|
| 389 |
+
all_templates: Dict[str, "TritonTemplate"] = dict()
|
| 390 |
+
|
| 391 |
+
@staticmethod
|
| 392 |
+
def _template_from_string(source):
|
| 393 |
+
env = _jinja2_env()
|
| 394 |
+
if env is not None:
|
| 395 |
+
return env.from_string(source)
|
| 396 |
+
return None
|
| 397 |
+
|
| 398 |
+
def __init__(self, name: str, grid: Any, source: str, debug=False):
|
| 399 |
+
super().__init__()
|
| 400 |
+
self.name = name
|
| 401 |
+
self.grid = grid
|
| 402 |
+
self.template = self._template_from_string(source)
|
| 403 |
+
assert name not in self.all_templates, "duplicate template name"
|
| 404 |
+
self.all_templates[name] = self
|
| 405 |
+
self.debug = debug
|
| 406 |
+
|
| 407 |
+
def maybe_append_choice(
|
| 408 |
+
self,
|
| 409 |
+
choices,
|
| 410 |
+
input_nodes,
|
| 411 |
+
layout,
|
| 412 |
+
num_stages,
|
| 413 |
+
num_warps,
|
| 414 |
+
prefix_args=0,
|
| 415 |
+
suffix_args=0,
|
| 416 |
+
epilogue_fn=identity,
|
| 417 |
+
**kwargs,
|
| 418 |
+
):
|
| 419 |
+
try:
|
| 420 |
+
choices.append(
|
| 421 |
+
self.generate(
|
| 422 |
+
input_nodes=input_nodes,
|
| 423 |
+
layout=layout,
|
| 424 |
+
num_stages=num_stages,
|
| 425 |
+
num_warps=num_warps,
|
| 426 |
+
prefix_args=prefix_args,
|
| 427 |
+
suffix_args=suffix_args,
|
| 428 |
+
epilogue_fn=epilogue_fn,
|
| 429 |
+
**kwargs,
|
| 430 |
+
)
|
| 431 |
+
)
|
| 432 |
+
except NotImplementedError:
|
| 433 |
+
pass
|
| 434 |
+
|
| 435 |
+
def generate(
|
| 436 |
+
self,
|
| 437 |
+
input_nodes,
|
| 438 |
+
layout,
|
| 439 |
+
num_stages,
|
| 440 |
+
num_warps,
|
| 441 |
+
prefix_args=0,
|
| 442 |
+
suffix_args=0,
|
| 443 |
+
epilogue_fn=identity,
|
| 444 |
+
**kwargs,
|
| 445 |
+
):
|
| 446 |
+
assert self.template, "requires jinja2"
|
| 447 |
+
defines = StringIO()
|
| 448 |
+
for name, val in kwargs.items():
|
| 449 |
+
defines.write(f" {name} : tl.constexpr = {val}\n")
|
| 450 |
+
defines = defines.getvalue()
|
| 451 |
+
|
| 452 |
+
fake_out = ir.Buffer("buf_out", layout)
|
| 453 |
+
kernel_name = f"triton_{self.name}"
|
| 454 |
+
|
| 455 |
+
numel = sympy_product(layout.size)
|
| 456 |
+
buffers = itertools.chain(input_nodes, (fake_out,))
|
| 457 |
+
if not TritonScheduling.can_use_32bit_indexing(numel, buffers):
|
| 458 |
+
raise NotImplementedError(
|
| 459 |
+
"64-bit indexing is not yet implemented for triton templates"
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
kernel_options = dict(
|
| 463 |
+
input_nodes=input_nodes,
|
| 464 |
+
defines=defines,
|
| 465 |
+
num_stages=num_stages,
|
| 466 |
+
num_warps=num_warps,
|
| 467 |
+
grid_fn=self.grid,
|
| 468 |
+
meta=kwargs,
|
| 469 |
+
call_sizes=layout.size,
|
| 470 |
+
prefix_args=prefix_args,
|
| 471 |
+
suffix_args=suffix_args,
|
| 472 |
+
epilogue_fn=epilogue_fn,
|
| 473 |
+
index_dtype="tl.int32",
|
| 474 |
+
)
|
| 475 |
+
with patch.object(
|
| 476 |
+
V.graph, "get_dtype", self.fake_get_dtype(fake_out)
|
| 477 |
+
), TritonTemplateKernel(
|
| 478 |
+
kernel_name=kernel_name,
|
| 479 |
+
output_node=fake_out,
|
| 480 |
+
use_jit=True,
|
| 481 |
+
**kernel_options,
|
| 482 |
+
) as kernel:
|
| 483 |
+
try:
|
| 484 |
+
code = kernel.render(self.template, kwargs).finalize()
|
| 485 |
+
except ZeroDivisionError:
|
| 486 |
+
# TODO(nmacchioni): fix sympy division by zero
|
| 487 |
+
return None
|
| 488 |
+
if self.debug:
|
| 489 |
+
print("Generated Code:\n", code)
|
| 490 |
+
extra = (
|
| 491 |
+
"-".join(
|
| 492 |
+
[
|
| 493 |
+
*[
|
| 494 |
+
f"{kwarg}={repr(kwargs[kwarg])}"
|
| 495 |
+
for kwarg in sorted(kwargs.keys())
|
| 496 |
+
],
|
| 497 |
+
f"num_stages={num_stages}",
|
| 498 |
+
f"num_warps={num_warps}",
|
| 499 |
+
]
|
| 500 |
+
)
|
| 501 |
+
+ "-"
|
| 502 |
+
)
|
| 503 |
+
mod = PyCodeCache.load(code, extra)
|
| 504 |
+
_, call_args, _ = kernel.args.python_argdefs()
|
| 505 |
+
|
| 506 |
+
expected_args = list(unique(x.get_name() for x in input_nodes))
|
| 507 |
+
expected_args.extend([fake_out.get_name()])
|
| 508 |
+
assert list(call_args)[: len(expected_args)] == expected_args, (
|
| 509 |
+
call_args,
|
| 510 |
+
expected_args,
|
| 511 |
+
)
|
| 512 |
+
extra_args = V.graph.sizevars.size_hints(
|
| 513 |
+
map(sympy.expand, call_args[len(expected_args) :])
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
kernel_hash_name = f"triton_{self.name}_{next(self.index_counter)}"
|
| 517 |
+
|
| 518 |
+
def make_kernel_render(out_node):
|
| 519 |
+
kernel = TritonTemplateKernel(
|
| 520 |
+
kernel_name="KERNEL_NAME",
|
| 521 |
+
output_node=out_node,
|
| 522 |
+
use_jit=False,
|
| 523 |
+
**kernel_options,
|
| 524 |
+
)
|
| 525 |
+
render = functools.partial(
|
| 526 |
+
kernel.render,
|
| 527 |
+
self.template,
|
| 528 |
+
kwargs,
|
| 529 |
+
)
|
| 530 |
+
return kernel, render
|
| 531 |
+
|
| 532 |
+
# create the BenchmarkRequest
|
| 533 |
+
grid = self.grid(*V.graph.sizevars.size_hints(layout.size), kwargs)
|
| 534 |
+
bmreq = BenchmarkRequest(
|
| 535 |
+
module_path=mod.__file__,
|
| 536 |
+
module_cache_key=mod.key,
|
| 537 |
+
kernel_name=kernel_name,
|
| 538 |
+
grid=grid,
|
| 539 |
+
extra_args=extra_args,
|
| 540 |
+
num_stages=num_stages,
|
| 541 |
+
num_warps=num_warps,
|
| 542 |
+
input_tensors=TensorMeta.from_irnodes(input_nodes),
|
| 543 |
+
output_tensor=TensorMeta.from_irnodes(layout),
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
return TritonTemplateCaller(
|
| 547 |
+
kernel_hash_name,
|
| 548 |
+
input_nodes,
|
| 549 |
+
layout,
|
| 550 |
+
make_kernel_render,
|
| 551 |
+
extra.strip("-").replace("-", ", "),
|
| 552 |
+
bmreq,
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
@staticmethod
|
| 556 |
+
def fake_get_dtype(fake_out):
|
| 557 |
+
_get_dtype_real = V.graph.get_dtype
|
| 558 |
+
|
| 559 |
+
def get_dtype(name):
|
| 560 |
+
if name == fake_out.get_name():
|
| 561 |
+
return fake_out.get_dtype()
|
| 562 |
+
return _get_dtype_real(name)
|
| 563 |
+
|
| 564 |
+
return get_dtype
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
class ExternKernelChoice:
|
| 568 |
+
def __init__(
|
| 569 |
+
self,
|
| 570 |
+
kernel,
|
| 571 |
+
cpp_kernel=None,
|
| 572 |
+
*,
|
| 573 |
+
name=None,
|
| 574 |
+
has_out_variant=True,
|
| 575 |
+
):
|
| 576 |
+
super().__init__()
|
| 577 |
+
name = name or kernel.__name__
|
| 578 |
+
assert callable(kernel)
|
| 579 |
+
assert not hasattr(extern_kernels, name), "duplicate extern kernel"
|
| 580 |
+
self.name = name
|
| 581 |
+
self.cpp_kernel = cpp_kernel
|
| 582 |
+
self.has_out_variant = has_out_variant
|
| 583 |
+
setattr(extern_kernels, name, kernel)
|
| 584 |
+
|
| 585 |
+
def to_callable(self):
|
| 586 |
+
return getattr(extern_kernels, self.name)
|
| 587 |
+
|
| 588 |
+
def call_name(self):
|
| 589 |
+
return f"extern_kernels.{self.name}"
|
| 590 |
+
|
| 591 |
+
@functools.lru_cache(None)
|
| 592 |
+
def hash_key(self):
|
| 593 |
+
fn = self.to_callable()
|
| 594 |
+
parts = [
|
| 595 |
+
self.name,
|
| 596 |
+
getattr(fn, "__name__", ""),
|
| 597 |
+
getattr(fn, "__module__", ""),
|
| 598 |
+
]
|
| 599 |
+
try:
|
| 600 |
+
parts.append(inspect.getsource(fn))
|
| 601 |
+
except Exception:
|
| 602 |
+
pass
|
| 603 |
+
return code_hash("-".join(parts))
|
| 604 |
+
|
| 605 |
+
def bind(self, input_nodes, layout, ordered_kwargs_for_cpp_kernel=(), **kwargs):
|
| 606 |
+
self.ordered_kwargs_for_cpp_kernel = ordered_kwargs_for_cpp_kernel
|
| 607 |
+
return ExternKernelCaller(
|
| 608 |
+
self, input_nodes, layout, kwargs, has_out_variant=self.has_out_variant
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
class ChoiceCaller:
|
| 613 |
+
def __init__(self, name, input_nodes, layout):
|
| 614 |
+
super().__init__()
|
| 615 |
+
self.name = name
|
| 616 |
+
self.layout = layout
|
| 617 |
+
self.input_nodes = input_nodes
|
| 618 |
+
|
| 619 |
+
def benchmark(self, *args, out):
|
| 620 |
+
algo = self.to_callable()
|
| 621 |
+
return do_bench(lambda: algo(*args, out=out))
|
| 622 |
+
|
| 623 |
+
def call_name(self):
|
| 624 |
+
raise NotImplementedError()
|
| 625 |
+
|
| 626 |
+
def to_callable(self):
|
| 627 |
+
raise NotImplementedError()
|
| 628 |
+
|
| 629 |
+
def hash_key(self):
|
| 630 |
+
raise NotImplementedError()
|
| 631 |
+
|
| 632 |
+
def output_node(self):
|
| 633 |
+
raise NotImplementedError()
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
class TritonTemplateCaller(ChoiceCaller):
|
| 637 |
+
def __init__(
|
| 638 |
+
self, name, input_nodes, layout, make_kernel_render, debug_extra, bmreq
|
| 639 |
+
):
|
| 640 |
+
super().__init__(name, input_nodes, layout)
|
| 641 |
+
self.make_kernel_render = make_kernel_render
|
| 642 |
+
self.debug_extra = debug_extra
|
| 643 |
+
self.bmreq = bmreq
|
| 644 |
+
|
| 645 |
+
def benchmark(self, *args, out):
|
| 646 |
+
assert self.bmreq is not None
|
| 647 |
+
return self.bmreq.benchmark(*args, output_tensor=out)
|
| 648 |
+
|
| 649 |
+
def __str__(self):
|
| 650 |
+
return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})"
|
| 651 |
+
|
| 652 |
+
def call_name(self):
|
| 653 |
+
return f"template_kernels.{self.name}"
|
| 654 |
+
|
| 655 |
+
def hash_key(self):
|
| 656 |
+
return "-".join(
|
| 657 |
+
[
|
| 658 |
+
self.name.rsplit("_", 1)[0],
|
| 659 |
+
self.bmreq.module_cache_key,
|
| 660 |
+
]
|
| 661 |
+
)
|
| 662 |
+
|
| 663 |
+
def output_node(self):
|
| 664 |
+
return ir.TensorBox.create(
|
| 665 |
+
ir.TemplateBuffer(
|
| 666 |
+
layout=self.layout,
|
| 667 |
+
inputs=self.input_nodes,
|
| 668 |
+
make_kernel_render=self.make_kernel_render,
|
| 669 |
+
)
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
class ExternKernelCaller(ChoiceCaller):
|
| 674 |
+
def __init__(
|
| 675 |
+
self,
|
| 676 |
+
choice: ExternKernelChoice,
|
| 677 |
+
input_nodes,
|
| 678 |
+
layout,
|
| 679 |
+
kwargs=None,
|
| 680 |
+
*,
|
| 681 |
+
has_out_variant=True,
|
| 682 |
+
):
|
| 683 |
+
super().__init__(choice.name, input_nodes, layout)
|
| 684 |
+
self.choice = choice
|
| 685 |
+
self.kwargs = kwargs or {}
|
| 686 |
+
self.has_out_variant = has_out_variant
|
| 687 |
+
|
| 688 |
+
def __str__(self):
|
| 689 |
+
return f"ExternKernelCaller({self.choice.call_name()})"
|
| 690 |
+
|
| 691 |
+
def benchmark(self, *args, out):
|
| 692 |
+
if self.has_out_variant:
|
| 693 |
+
return super().benchmark(*args, out=out)
|
| 694 |
+
else:
|
| 695 |
+
algo = self.to_callable()
|
| 696 |
+
out_new = algo(*args)
|
| 697 |
+
torch._C._dynamo.guards.assert_size_stride( # type: ignore[attr-defined]
|
| 698 |
+
out_new, tuple(out.size()), tuple(out.stride())
|
| 699 |
+
)
|
| 700 |
+
out.copy_(out_new) # for correctness checking
|
| 701 |
+
return do_bench(lambda: algo(*args))
|
| 702 |
+
|
| 703 |
+
def to_callable(self):
|
| 704 |
+
fn = self.choice.to_callable()
|
| 705 |
+
if self.kwargs:
|
| 706 |
+
return functools.partial(fn, **self.kwargs)
|
| 707 |
+
else:
|
| 708 |
+
return fn
|
| 709 |
+
|
| 710 |
+
def hash_key(self):
|
| 711 |
+
return "-".join(
|
| 712 |
+
[
|
| 713 |
+
self.choice.name,
|
| 714 |
+
*[
|
| 715 |
+
f"{kwarg}={repr(self.kwargs[kwarg])}"
|
| 716 |
+
for kwarg in sorted(self.kwargs.keys())
|
| 717 |
+
],
|
| 718 |
+
self.choice.hash_key(),
|
| 719 |
+
]
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
def output_node(self):
|
| 723 |
+
cls: Union[Type[ir.ExternKernelOut], Type[ir.ExternKernelAlloc]]
|
| 724 |
+
if self.has_out_variant:
|
| 725 |
+
cls = ir.ExternKernelOut
|
| 726 |
+
else:
|
| 727 |
+
cls = ir.ExternKernelAlloc
|
| 728 |
+
return ir.TensorBox.create(
|
| 729 |
+
cls(
|
| 730 |
+
layout=self.layout,
|
| 731 |
+
inputs=self.input_nodes,
|
| 732 |
+
kernel=self.choice.call_name(),
|
| 733 |
+
cpp_kernel=self.choice.cpp_kernel,
|
| 734 |
+
ordered_kwargs_for_cpp_kernel=self.choice.ordered_kwargs_for_cpp_kernel,
|
| 735 |
+
kwargs=self.kwargs,
|
| 736 |
+
)
|
| 737 |
+
)
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
class ErrorFromChoice(RuntimeError):
|
| 741 |
+
def __init__(self, msg, choice: ChoiceCaller, inputs_str):
|
| 742 |
+
msg += f"\nFrom choice {choice}\n{inputs_str}"
|
| 743 |
+
super().__init__(msg)
|
| 744 |
+
self.choice = choice
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
class AlgorithmSelectorCache(PersistentCache):
|
| 748 |
+
def __call__(self, name, choices: List[ChoiceCaller], input_nodes, layout):
|
| 749 |
+
# TODO(nmacchioni): remove once CI tests are fixed
|
| 750 |
+
choices = [choice for choice in choices if choice is not None]
|
| 751 |
+
if len(choices) == 0:
|
| 752 |
+
raise RuntimeError(
|
| 753 |
+
"No choices to select, please consider adding ATEN into max_autotune_gemm_backends "
|
| 754 |
+
"config (defined in torch/_inductor/config.py) to allow at least one choice. "
|
| 755 |
+
)
|
| 756 |
+
|
| 757 |
+
if len(choices) == 1:
|
| 758 |
+
return choices[0].output_node()
|
| 759 |
+
|
| 760 |
+
@functools.lru_cache(None)
|
| 761 |
+
def make_benchmark_fn():
|
| 762 |
+
return self.make_benchmark_fn(choices, input_nodes, layout)
|
| 763 |
+
|
| 764 |
+
def autotune(choice):
|
| 765 |
+
benchmark_fn = make_benchmark_fn()
|
| 766 |
+
try:
|
| 767 |
+
timing = benchmark_fn(
|
| 768 |
+
choice,
|
| 769 |
+
)
|
| 770 |
+
except RuntimeError as e:
|
| 771 |
+
msg = str(e)
|
| 772 |
+
if "invalid argument" in msg:
|
| 773 |
+
msg += "\n\nThis may mean this GPU is too small for max_autotune mode.\n\n"
|
| 774 |
+
log.warning(msg)
|
| 775 |
+
return float("inf")
|
| 776 |
+
elif "illegal memory access" in msg:
|
| 777 |
+
msg += "\n\nEither error in template or triton bug.\n"
|
| 778 |
+
raise ErrorFromChoice(msg, choice, benchmark_fn.debug_str())
|
| 779 |
+
except AssertionError as e:
|
| 780 |
+
raise AssertionError(f"Incorrect result from choice {choice}\n\n{e}")
|
| 781 |
+
return timing
|
| 782 |
+
|
| 783 |
+
if config.autotune_in_subproc:
|
| 784 |
+
from .autotune_process import tuning_process
|
| 785 |
+
|
| 786 |
+
# do the optional warmup
|
| 787 |
+
tuning_process.initialize()
|
| 788 |
+
assert tuning_process.valid()
|
| 789 |
+
|
| 790 |
+
autotune_start_ts = time.time()
|
| 791 |
+
timings = self.lookup(
|
| 792 |
+
choices,
|
| 793 |
+
name,
|
| 794 |
+
repr([self.key_of(x) for x in input_nodes]),
|
| 795 |
+
autotune,
|
| 796 |
+
)
|
| 797 |
+
autotune_elapse = time.time() - autotune_start_ts
|
| 798 |
+
if timings == {} or choices[0] not in timings:
|
| 799 |
+
return choices[0].output_node()
|
| 800 |
+
|
| 801 |
+
if make_benchmark_fn.cache_info().currsize:
|
| 802 |
+
counters["inductor"]["select_algorithm_autotune"] += 1
|
| 803 |
+
self.log_results(name, input_nodes, timings, autotune_elapse)
|
| 804 |
+
return builtins.min(timings, key=timings.__getitem__).output_node()
|
| 805 |
+
|
| 806 |
+
@classmethod
|
| 807 |
+
def make_benchmark_fn(
|
| 808 |
+
cls,
|
| 809 |
+
choices,
|
| 810 |
+
input_nodes,
|
| 811 |
+
layout,
|
| 812 |
+
):
|
| 813 |
+
# de-duplicate args
|
| 814 |
+
unique_example_inputs = {
|
| 815 |
+
x.get_name(): cls.benchmark_example_value(x) for x in input_nodes
|
| 816 |
+
}
|
| 817 |
+
example_inputs = list(unique_example_inputs.values())
|
| 818 |
+
example_inputs_extern = [
|
| 819 |
+
torch.as_strided(
|
| 820 |
+
unique_example_inputs[input_node.get_name()],
|
| 821 |
+
V.graph.sizevars.size_hints(input_node.get_size()),
|
| 822 |
+
V.graph.sizevars.size_hints(input_node.get_stride()),
|
| 823 |
+
V.graph.sizevars.size_hint(input_node.get_layout().offset),
|
| 824 |
+
)
|
| 825 |
+
for input_node in input_nodes
|
| 826 |
+
]
|
| 827 |
+
|
| 828 |
+
out = cls.benchmark_example_value(layout)
|
| 829 |
+
out_extern = torch.as_strided(
|
| 830 |
+
out, out.size(), out.stride(), V.graph.sizevars.size_hint(layout.offset)
|
| 831 |
+
)
|
| 832 |
+
if VERIFY:
|
| 833 |
+
choices[0].benchmark(*example_inputs_extern, out=out_extern)
|
| 834 |
+
expected = out_extern.clone()
|
| 835 |
+
|
| 836 |
+
if DEBUG:
|
| 837 |
+
print(f"{len(choices)} tuning requests:")
|
| 838 |
+
|
| 839 |
+
def benchmark_in_current_process(choice):
|
| 840 |
+
if DEBUG:
|
| 841 |
+
start_ts = time.time()
|
| 842 |
+
out.zero_()
|
| 843 |
+
if isinstance(choice, ExternKernelCaller):
|
| 844 |
+
# aten kernels want the offset baked in for sliced tensors
|
| 845 |
+
result = choice.benchmark(*example_inputs_extern, out=out_extern)
|
| 846 |
+
else:
|
| 847 |
+
# triton templates want the base pointer for sliced tensors
|
| 848 |
+
result = choice.benchmark(*example_inputs, out=out)
|
| 849 |
+
if VERIFY:
|
| 850 |
+
torch.testing.assert_close(out_extern, expected, **VERIFY)
|
| 851 |
+
torch.cuda.synchronize() # shake out any CUDA errors
|
| 852 |
+
return result
|
| 853 |
+
|
| 854 |
+
def benchmark_in_sub_process(choice):
|
| 855 |
+
# only benchmark triton kernel in sub process for now.
|
| 856 |
+
# ATen/Extern kernel are still benchmarked in the current process.
|
| 857 |
+
if isinstance(choice, ExternKernelCaller):
|
| 858 |
+
return benchmark_in_current_process(choice)
|
| 859 |
+
|
| 860 |
+
from . import autotune_process
|
| 861 |
+
|
| 862 |
+
if DEBUG:
|
| 863 |
+
start_ts = time.time()
|
| 864 |
+
|
| 865 |
+
out = autotune_process.benchmark_in_sub_process(
|
| 866 |
+
choice,
|
| 867 |
+
)
|
| 868 |
+
if DEBUG:
|
| 869 |
+
elapse = time.time() - start_ts
|
| 870 |
+
print(f"MultiProcessTuning {choice}: {elapse}")
|
| 871 |
+
return out
|
| 872 |
+
|
| 873 |
+
benchmark = (
|
| 874 |
+
benchmark_in_sub_process
|
| 875 |
+
if config.autotune_in_subproc
|
| 876 |
+
else benchmark_in_current_process
|
| 877 |
+
)
|
| 878 |
+
|
| 879 |
+
def debug_str():
|
| 880 |
+
def tensor_repr(x):
|
| 881 |
+
return (
|
| 882 |
+
f"torch.empty_strided({tuple(x.size())!r}, {tuple(x.stride())!r}, "
|
| 883 |
+
f"dtype={x.dtype!r}, device={x.device.type!r})"
|
| 884 |
+
)
|
| 885 |
+
|
| 886 |
+
lines = [
|
| 887 |
+
"inputs = [",
|
| 888 |
+
]
|
| 889 |
+
for x in example_inputs:
|
| 890 |
+
lines.append(f" {tensor_repr(x)},")
|
| 891 |
+
lines += ["]", f"out = {tensor_repr(out)}", ""]
|
| 892 |
+
return "\n".join(lines)
|
| 893 |
+
|
| 894 |
+
benchmark.debug_str = debug_str # type: ignore[attr-defined]
|
| 895 |
+
return benchmark
|
| 896 |
+
|
| 897 |
+
@staticmethod
|
| 898 |
+
def log_results(name, input_nodes, timings, elapse):
|
| 899 |
+
if not (config.max_autotune or config.max_autotune_gemm) or not PRINT_AUTOTUNE:
|
| 900 |
+
return
|
| 901 |
+
sizes = ", ".join(
|
| 902 |
+
[
|
| 903 |
+
"x".join(map(str, V.graph.sizevars.size_hints(n.get_size())))
|
| 904 |
+
for n in input_nodes
|
| 905 |
+
]
|
| 906 |
+
)
|
| 907 |
+
top_k = sorted(timings, key=timings.__getitem__)[:10]
|
| 908 |
+
best = top_k[0]
|
| 909 |
+
best_time = timings[best]
|
| 910 |
+
sys.stderr.write(f"AUTOTUNE {name}({sizes})\n")
|
| 911 |
+
for choice in top_k:
|
| 912 |
+
result = timings[choice]
|
| 913 |
+
sys.stderr.write(
|
| 914 |
+
f" {choice.name} {result:.4f} ms {best_time/result:.1%}\n"
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
autotune_type_str = (
|
| 918 |
+
"SubProcess" if config.autotune_in_subproc else "SingleProcess"
|
| 919 |
+
)
|
| 920 |
+
sys.stderr.write(f"{autotune_type_str} AUTOTUNE takes {elapse:.4f} seconds\n")
|
| 921 |
+
|
| 922 |
+
@staticmethod
|
| 923 |
+
def benchmark_example_value(node):
|
| 924 |
+
"""
|
| 925 |
+
Convert an ir.Buffer into a concrete torch.Tensor we can use for
|
| 926 |
+
benchmarking.
|
| 927 |
+
"""
|
| 928 |
+
if isinstance(node, ir.Layout):
|
| 929 |
+
node = ir.Buffer("fake", node)
|
| 930 |
+
# triton templates want the base tensor.
|
| 931 |
+
if isinstance(node, ir.BaseView):
|
| 932 |
+
node = node.unwrap_view()
|
| 933 |
+
return rand_strided(
|
| 934 |
+
V.graph.sizevars.size_hints(node.get_size()),
|
| 935 |
+
V.graph.sizevars.size_hints(node.get_stride()),
|
| 936 |
+
device=node.get_device(),
|
| 937 |
+
dtype=node.get_dtype(),
|
| 938 |
+
extra_size=node.layout.offset,
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
@staticmethod
|
| 942 |
+
def key_of(node):
|
| 943 |
+
"""
|
| 944 |
+
Extract the pieces of an ir.Buffer that we should invalidate cached
|
| 945 |
+
autotuning results on.
|
| 946 |
+
"""
|
| 947 |
+
sizevars = V.graph.sizevars
|
| 948 |
+
return (
|
| 949 |
+
node.get_device().type,
|
| 950 |
+
str(node.get_dtype()),
|
| 951 |
+
*sizevars.size_hints(node.get_size()),
|
| 952 |
+
*sizevars.size_hints(node.get_stride()),
|
| 953 |
+
sizevars.size_hint(node.get_layout().offset),
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
_ALGORITHM_SELECTOR_CACHE = None
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
def autotune_select_algorithm(*args, **kwargs):
|
| 961 |
+
global _ALGORITHM_SELECTOR_CACHE
|
| 962 |
+
if _ALGORITHM_SELECTOR_CACHE is None:
|
| 963 |
+
_ALGORITHM_SELECTOR_CACHE = AlgorithmSelectorCache()
|
| 964 |
+
return _ALGORITHM_SELECTOR_CACHE(*args, **kwargs)
|
| 965 |
+
|
| 966 |
+
|
| 967 |
+
def realize_inputs(*args):
|
| 968 |
+
if len(args) == 1:
|
| 969 |
+
return ir.ExternKernel.require_stride1(ir.ExternKernel.realize_input(args[0]))
|
| 970 |
+
return [realize_inputs(x) for x in args]
|
| 971 |
+
|
| 972 |
+
|
| 973 |
+
# ensure lowering is imported so that `extern_kernels.*` is populated
|
| 974 |
+
from . import lowering # noqa: F401
|
llava_next/lib/python3.10/site-packages/torch/_inductor/sizevars.py
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Callable, Dict, List, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import sympy
|
| 7 |
+
from sympy import Expr
|
| 8 |
+
|
| 9 |
+
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
| 10 |
+
from torch.utils._sympy.functions import FloorDiv, ModularIndexing
|
| 11 |
+
|
| 12 |
+
from .utils import sympy_subs, sympy_symbol, VarRanges
|
| 13 |
+
from .virtualized import V
|
| 14 |
+
|
| 15 |
+
log = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# This class is a little awkward, because ShapeEnv is doing most of the heavy
|
| 19 |
+
# lifting and in some cases we should be directly passing through to ShapeEnv,
|
| 20 |
+
# but there is some extra inductor logic that needs to be handled here
|
| 21 |
+
class SizeVarAllocator:
|
| 22 |
+
def __init__(self, shape_env=None):
|
| 23 |
+
super().__init__()
|
| 24 |
+
if shape_env is None:
|
| 25 |
+
shape_env = ShapeEnv()
|
| 26 |
+
self.shape_env = shape_env
|
| 27 |
+
self.var_to_val = self.shape_env.var_to_val
|
| 28 |
+
self.replacements: Dict[sympy.Symbol, Expr] = self.shape_env.replacements
|
| 29 |
+
# Maps of dynamic sizes that have to be precomputed on the host to the kernel args.
|
| 30 |
+
# The basic idea is if we have some complicated sympy expression
|
| 31 |
+
# f(s0), we may choose to precompute it on the host and then replace
|
| 32 |
+
# all occurrences of that sympy expression with ps0, so that when we
|
| 33 |
+
# codegen we simply reference ps0 directly without repeating
|
| 34 |
+
# f(s0). Unlike regular size variables, ps variables cannot be
|
| 35 |
+
# guarded upon; so if we are asked to guard on a Sympy expression
|
| 36 |
+
# which potentially could have already had a precomputed replacement
|
| 37 |
+
# on it, we are obligated to invert the precomputed replacements
|
| 38 |
+
# (inv_precomputed_replacements).
|
| 39 |
+
self.precomputed_replacements: Dict[Expr, sympy.Symbol] = dict()
|
| 40 |
+
self.inv_precomputed_replacements: Dict[sympy.Symbol, Expr] = dict()
|
| 41 |
+
self.stride_vars = self.make_stride_vars_cache()
|
| 42 |
+
self.simplify_with_ranges = self.make_simplify_with_ranges_cache()
|
| 43 |
+
self._simplify_loops = self.make_simplify_loops_cache()
|
| 44 |
+
|
| 45 |
+
def simplify(self, expr: Expr):
|
| 46 |
+
return sympy.expand(expr).xreplace(self.replacements)
|
| 47 |
+
|
| 48 |
+
def make_simplify_with_ranges_cache(self):
|
| 49 |
+
"""
|
| 50 |
+
self._simplify_with_ranges() can be expensive, cache its results
|
| 51 |
+
"""
|
| 52 |
+
cache = dict()
|
| 53 |
+
replacement_count = len(self.replacements)
|
| 54 |
+
|
| 55 |
+
def simplify_with_ranges(expr: Expr, var_ranges: VarRanges):
|
| 56 |
+
nonlocal replacement_count
|
| 57 |
+
if replacement_count != len(self.replacements):
|
| 58 |
+
# new replacements invalidates cached results
|
| 59 |
+
cache.clear()
|
| 60 |
+
replacement_count = len(self.replacements)
|
| 61 |
+
key = (expr, *var_ranges.items())
|
| 62 |
+
result = cache.get(key, None)
|
| 63 |
+
if result is None:
|
| 64 |
+
result = self._simplify_with_ranges(expr, var_ranges)
|
| 65 |
+
cache[key] = result
|
| 66 |
+
return result
|
| 67 |
+
|
| 68 |
+
return simplify_with_ranges
|
| 69 |
+
|
| 70 |
+
def make_simplify_loops_cache(self):
|
| 71 |
+
"""
|
| 72 |
+
self._simplify_with_ranges() can be expensive, cache its results
|
| 73 |
+
"""
|
| 74 |
+
cache = dict()
|
| 75 |
+
replacement_count = len(self.replacements)
|
| 76 |
+
|
| 77 |
+
def simplify_loops(index_vars, sizes, index_formulas):
|
| 78 |
+
nonlocal replacement_count
|
| 79 |
+
if replacement_count != len(self.replacements):
|
| 80 |
+
# new replacements invalidates cached results
|
| 81 |
+
cache.clear()
|
| 82 |
+
replacement_count = len(self.replacements)
|
| 83 |
+
key = (*index_vars, *sizes, *index_formulas)
|
| 84 |
+
result = cache.get(key, None)
|
| 85 |
+
if result is None:
|
| 86 |
+
result = self._simplify_loops_impl(index_vars, sizes, index_formulas)
|
| 87 |
+
cache[key] = result
|
| 88 |
+
return result
|
| 89 |
+
|
| 90 |
+
return simplify_loops
|
| 91 |
+
|
| 92 |
+
def _simplify_with_ranges(self, expr: Expr, var_ranges: VarRanges):
|
| 93 |
+
"""
|
| 94 |
+
Simplify indexing expression with knowledge of the ranges of
|
| 95 |
+
iteration variables.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
expr = join_dimensions(self.simplify(expr))
|
| 99 |
+
original_expr = expr
|
| 100 |
+
|
| 101 |
+
def remove_zero_terms(base, divisor):
|
| 102 |
+
"""Symbols smaller than the divisor are zero"""
|
| 103 |
+
for v in base.free_symbols:
|
| 104 |
+
if v in var_ranges:
|
| 105 |
+
# var smaller than divisor can be removed
|
| 106 |
+
# if the rest is guaranteed to be multiple of divisor
|
| 107 |
+
rest = sympy.Wild("_rest", exclude=[v])
|
| 108 |
+
m = base.match(v + rest)
|
| 109 |
+
if m and v not in m[rest].free_symbols:
|
| 110 |
+
gcd = sympy.gcd(m[rest], divisor)
|
| 111 |
+
if gcd == divisor:
|
| 112 |
+
if self.statically_known_leq(var_ranges[v], divisor):
|
| 113 |
+
base = m[rest]
|
| 114 |
+
return base
|
| 115 |
+
|
| 116 |
+
def visit_indexing_div(base, divisor):
|
| 117 |
+
return FloorDiv(remove_zero_terms(base, divisor), divisor)
|
| 118 |
+
|
| 119 |
+
def visit_modular_indexing(base, divisor, modulus):
|
| 120 |
+
base = remove_zero_terms(base, divisor)
|
| 121 |
+
base_pos = True
|
| 122 |
+
if isinstance(base, ModularIndexing):
|
| 123 |
+
# for modular indexing, biggest values from the ranges don't necessarily result in
|
| 124 |
+
# the biggest result, the biggest result is modulus - 1
|
| 125 |
+
base_s = base.args[2] - 1
|
| 126 |
+
elif not base.has(ModularIndexing):
|
| 127 |
+
# actual iteration range is to size-1
|
| 128 |
+
iter_ranges_zero = {k: 0 for k, v in var_ranges.items()}
|
| 129 |
+
base_lowest = sympy_subs(base, iter_ranges_zero)
|
| 130 |
+
if self.statically_known_leq(0, base_lowest):
|
| 131 |
+
# can't replace with indexing div if base can be negative
|
| 132 |
+
base_pos = True
|
| 133 |
+
else:
|
| 134 |
+
base_pos = False
|
| 135 |
+
iter_ranges = {k: v - 1 for k, v in var_ranges.items()}
|
| 136 |
+
base_s = sympy_subs(base, iter_ranges)
|
| 137 |
+
else:
|
| 138 |
+
base_s = base
|
| 139 |
+
if self.statically_known_lt(base_s, modulus * divisor) and base_pos:
|
| 140 |
+
return FloorDiv(base, divisor)
|
| 141 |
+
return ModularIndexing(base, divisor, modulus)
|
| 142 |
+
|
| 143 |
+
if expr.has(ModularIndexing):
|
| 144 |
+
expr = expr.replace(
|
| 145 |
+
ModularIndexing(
|
| 146 |
+
sympy.Wild("base"),
|
| 147 |
+
sympy.Wild("divisor"),
|
| 148 |
+
sympy.Wild("modulus"),
|
| 149 |
+
),
|
| 150 |
+
visit_modular_indexing,
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
if expr.has(FloorDiv):
|
| 154 |
+
expr = expr.replace(
|
| 155 |
+
FloorDiv(
|
| 156 |
+
sympy.Wild("base"),
|
| 157 |
+
sympy.Wild("divisor"),
|
| 158 |
+
),
|
| 159 |
+
visit_indexing_div,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
if expr != original_expr:
|
| 163 |
+
return self._simplify_with_ranges(expr, var_ranges)
|
| 164 |
+
return expr
|
| 165 |
+
|
| 166 |
+
def _simplify_loops_impl(self, index_vars, sizes, index_formulas):
|
| 167 |
+
"""
|
| 168 |
+
Try to remove as many axis from loop iterations as possible, by:
|
| 169 |
+
1) removing size==1 dimensions
|
| 170 |
+
2) fuse contiguous dimensions into a single loop
|
| 171 |
+
If channel_last = True, we will prevent the last dim fused with other dims
|
| 172 |
+
"""
|
| 173 |
+
sizes = list(map(self.simplify, sizes))
|
| 174 |
+
|
| 175 |
+
strides = [self.stride_vars(x, index_vars) for x in index_formulas]
|
| 176 |
+
assert len(sizes) == len(strides[0]), (len(sizes), len(strides[0]))
|
| 177 |
+
|
| 178 |
+
for i in range(len(sizes)):
|
| 179 |
+
if sizes[i] == 1:
|
| 180 |
+
# remove dim
|
| 181 |
+
sizes[i] = None
|
| 182 |
+
|
| 183 |
+
def can_merge_dims(a, b):
|
| 184 |
+
for k in range(len(strides)):
|
| 185 |
+
if self.simplify(strides[k][a] * sizes[a]) == self.simplify(
|
| 186 |
+
strides[k][b]
|
| 187 |
+
):
|
| 188 |
+
# approximate test passed, try sound version
|
| 189 |
+
va = index_vars[a]
|
| 190 |
+
vb = index_vars[b]
|
| 191 |
+
v = sympy_symbol("_merge_tester")
|
| 192 |
+
expr1 = sympy_subs(index_formulas[k], {va: v * sizes[a], vb: 0})
|
| 193 |
+
expr2 = sympy_subs(index_formulas[k], {va: 0, vb: v})
|
| 194 |
+
if self.simplify(expr1) == self.simplify(expr2):
|
| 195 |
+
continue
|
| 196 |
+
return False
|
| 197 |
+
return True
|
| 198 |
+
|
| 199 |
+
changed = True
|
| 200 |
+
while changed:
|
| 201 |
+
changed = False
|
| 202 |
+
for i, j in itertools.product(
|
| 203 |
+
reversed(range(len(sizes))), reversed(range(len(sizes)))
|
| 204 |
+
):
|
| 205 |
+
if i == j or sizes[i] is None or sizes[j] is None:
|
| 206 |
+
continue
|
| 207 |
+
if can_merge_dims(i, j):
|
| 208 |
+
changed = True
|
| 209 |
+
sizes[i] = sizes[i] * sizes[j]
|
| 210 |
+
sizes[j] = None
|
| 211 |
+
|
| 212 |
+
def reindex(index):
|
| 213 |
+
it = list(reversed(index))
|
| 214 |
+
new_index = []
|
| 215 |
+
for size in sizes:
|
| 216 |
+
if size is None:
|
| 217 |
+
new_index.append(sympy.Integer(0))
|
| 218 |
+
else:
|
| 219 |
+
new_index.append(it.pop())
|
| 220 |
+
assert not it
|
| 221 |
+
return new_index
|
| 222 |
+
|
| 223 |
+
def prune(index):
|
| 224 |
+
assert len(index) == len(sizes)
|
| 225 |
+
return [i for i, s in zip(index, sizes) if s is not None]
|
| 226 |
+
|
| 227 |
+
return [x for x in sizes if x is not None], reindex, prune
|
| 228 |
+
|
| 229 |
+
# Note - [On Statically Known]
|
| 230 |
+
#
|
| 231 |
+
# The statically_known_* family of functions below replaces a prior system, called maybe_guard_*. The prior system
|
| 232 |
+
# operated by providing esentially a question, where the size hinted values were evaluted. If the condition was
|
| 233 |
+
# true, we add a guard and return True, otherwise, False.
|
| 234 |
+
#
|
| 235 |
+
# def maybe_guard_foo(args):
|
| 236 |
+
# if size_hinted_check(args):
|
| 237 |
+
# return False # No guard, no optim
|
| 238 |
+
# guard(args) # Make a guard
|
| 239 |
+
# return True # Safe to apply optimization
|
| 240 |
+
#
|
| 241 |
+
# The prior system incurred a guard, and green lit an optimization.
|
| 242 |
+
#
|
| 243 |
+
# The new system works in reverse - in the new system, if we know that the inputs are static, and evaluate the
|
| 244 |
+
# condition as true, we green light the optimization, and we do not incur a guard. If we cannot prove that, we
|
| 245 |
+
# return False.
|
| 246 |
+
#
|
| 247 |
+
# def maybe_guard_foo(args):
|
| 248 |
+
# if all_static(args):
|
| 249 |
+
# return True # Safe to apply optimization
|
| 250 |
+
# else:
|
| 251 |
+
# return False # No guard, no optim
|
| 252 |
+
|
| 253 |
+
# See Note - [On Statically Known]
|
| 254 |
+
|
| 255 |
+
def is_expr_static_and_true(self, expr: Union[Expr, int]) -> bool:
|
| 256 |
+
if expr in (True, False):
|
| 257 |
+
return expr
|
| 258 |
+
|
| 259 |
+
try:
|
| 260 |
+
simplified = self.shape_env._maybe_evaluate_static(expr)
|
| 261 |
+
if simplified is not None:
|
| 262 |
+
return bool(simplified)
|
| 263 |
+
except Exception:
|
| 264 |
+
log.debug("Could not simplify %s", expr)
|
| 265 |
+
|
| 266 |
+
return False
|
| 267 |
+
|
| 268 |
+
def statically_known_equals(self, left: Expr, right: Expr) -> bool:
|
| 269 |
+
"""
|
| 270 |
+
Returns a bool indicating if it is sound to optimize as if left and right are equal.
|
| 271 |
+
"""
|
| 272 |
+
return self.is_expr_static_and_true(sympy.Eq(left, right))
|
| 273 |
+
|
| 274 |
+
# See Note - [On Statically Known]
|
| 275 |
+
def statically_known_list_equals(self, left: List[Expr], right: List[Expr]) -> bool:
|
| 276 |
+
"""
|
| 277 |
+
Returns a bool indicating if it is sound to optimize as if left and right lists are equal.
|
| 278 |
+
"""
|
| 279 |
+
if len(left) != len(right):
|
| 280 |
+
return False
|
| 281 |
+
if all(self.statically_known_equals(l, r) for l, r in zip(left, right)):
|
| 282 |
+
return True
|
| 283 |
+
return False
|
| 284 |
+
|
| 285 |
+
# See Note - [On Statically Known]
|
| 286 |
+
def statically_known_leq(self, left: Expr, right: Expr) -> bool:
|
| 287 |
+
"""
|
| 288 |
+
Returns a bool indicating if it is sound to optimize as if left is less than or equal to right.
|
| 289 |
+
"""
|
| 290 |
+
expr = left <= right
|
| 291 |
+
return self.is_expr_static_and_true(expr)
|
| 292 |
+
|
| 293 |
+
# See Note - [On Statically Known]
|
| 294 |
+
def statically_known_lt(self, left: Expr, right: Expr) -> bool:
|
| 295 |
+
"""
|
| 296 |
+
Returns a bool indicating if it is sound to optimize as if left is less than right.
|
| 297 |
+
"""
|
| 298 |
+
expr = left < right
|
| 299 |
+
return self.is_expr_static_and_true(expr)
|
| 300 |
+
|
| 301 |
+
# See Note - [On Statically Known]
|
| 302 |
+
def statically_known_multiple_of(self, numerator: Expr, denominator: Expr) -> bool:
|
| 303 |
+
"""
|
| 304 |
+
Return a bool indicating if it is sound to optimize for the numerator being a multiple of the denominator.
|
| 305 |
+
"""
|
| 306 |
+
expr = sympy.Eq(numerator % denominator, 0)
|
| 307 |
+
return self.is_expr_static_and_true(expr)
|
| 308 |
+
|
| 309 |
+
# The guard functions require you to ALREADY KNOW that a particular
|
| 310 |
+
# condition holds. If you don't know (you want to guard on an expression
|
| 311 |
+
# being a particular value, and then get access to that value), use
|
| 312 |
+
# the evaluate functions.
|
| 313 |
+
|
| 314 |
+
def guard_equals(self, left: Expr, right: Expr) -> Expr:
|
| 315 |
+
if isinstance(left, Expr):
|
| 316 |
+
left = sympy_subs(left, self.inv_precomputed_replacements)
|
| 317 |
+
if isinstance(right, Expr):
|
| 318 |
+
right = sympy_subs(right, self.inv_precomputed_replacements)
|
| 319 |
+
assert self.shape_env.evaluate_expr(sympy.Eq(left, right))
|
| 320 |
+
return left
|
| 321 |
+
|
| 322 |
+
def guard_leq(self, left: Expr, right: Expr) -> None:
|
| 323 |
+
return self.guard_lt(left, right + 1)
|
| 324 |
+
|
| 325 |
+
def guard_lt(self, left: Expr, right: Expr) -> None:
|
| 326 |
+
assert self.shape_env.evaluate_expr(sympy.Lt(left, right))
|
| 327 |
+
|
| 328 |
+
# The evaluate functions evaluate some symbolic sympy expression
|
| 329 |
+
# (NB: not necessarily an Expr) and return what the concrete result
|
| 330 |
+
# is, guarding on the expression being that result
|
| 331 |
+
|
| 332 |
+
# NB: write evaluate_expr(sympy.Lt(a, b)) rather than evaluate_expr(a < b)
|
| 333 |
+
# as this will ensure that you actually have a sympy'ified expression,
|
| 334 |
+
# and will prevent you from incorrectly writing evaluate_expr(a == b)
|
| 335 |
+
# which does the wrong thing if a or b is a sympy expression
|
| 336 |
+
def evaluate_expr(self, left: Union[Expr, sympy.logic.boolalg.Boolean]) -> bool:
|
| 337 |
+
assert isinstance(left, (Expr, sympy.logic.boolalg.Boolean)), type(left)
|
| 338 |
+
return self.shape_env.evaluate_expr(sympy.sympify(left))
|
| 339 |
+
|
| 340 |
+
def evaluate_min(self, left: Expr, right: Expr) -> Expr:
|
| 341 |
+
"""return the smaller of left and right, and guard on that choice"""
|
| 342 |
+
lv = self.size_hint(left)
|
| 343 |
+
rv = self.size_hint(right)
|
| 344 |
+
if lv == rv:
|
| 345 |
+
return self.guard_equals(left, right)
|
| 346 |
+
elif lv < rv:
|
| 347 |
+
self.guard_lt(left, right)
|
| 348 |
+
return left
|
| 349 |
+
else:
|
| 350 |
+
self.guard_lt(right, left)
|
| 351 |
+
return right
|
| 352 |
+
|
| 353 |
+
def evaluate_static_shape(self, left: Expr) -> int:
|
| 354 |
+
right = self.size_hint(left)
|
| 355 |
+
self.guard_equals(left, sympy.Integer(right))
|
| 356 |
+
return int(right)
|
| 357 |
+
|
| 358 |
+
def evaluate_static_shapes(self, left: List[Expr]) -> List[int]:
|
| 359 |
+
return [self.evaluate_static_shape(x) for x in left]
|
| 360 |
+
|
| 361 |
+
def symbolic_hint(self, expr: Expr) -> Expr:
|
| 362 |
+
# Substitute all hints into expr, but leave unbacked symints alone
|
| 363 |
+
if not isinstance(expr, Expr):
|
| 364 |
+
assert isinstance(expr, int)
|
| 365 |
+
return expr
|
| 366 |
+
free_symbols = expr.free_symbols
|
| 367 |
+
if not free_symbols:
|
| 368 |
+
return int(expr)
|
| 369 |
+
while any(s.name.startswith("ps") for s in free_symbols):
|
| 370 |
+
expr = sympy_subs(expr, self.inv_precomputed_replacements)
|
| 371 |
+
free_symbols = expr.free_symbols
|
| 372 |
+
return sympy_subs(expr, self.var_to_val)
|
| 373 |
+
|
| 374 |
+
def size_hint(self, expr: Expr) -> int:
|
| 375 |
+
out = self.symbolic_hint(expr)
|
| 376 |
+
try:
|
| 377 |
+
return int(out)
|
| 378 |
+
except Exception:
|
| 379 |
+
log.debug("failed on: %s", out)
|
| 380 |
+
raise
|
| 381 |
+
|
| 382 |
+
def size_hints(self, exprs: List[Expr]) -> Tuple[int, ...]:
|
| 383 |
+
return tuple(self.size_hint(x) for x in exprs)
|
| 384 |
+
|
| 385 |
+
def _lru_cache(self, fn, maxsize=None):
|
| 386 |
+
"""
|
| 387 |
+
Wrapper around functools.lru_cache that clears when replacements
|
| 388 |
+
has been invalidated.
|
| 389 |
+
"""
|
| 390 |
+
fn_cache = functools.lru_cache(maxsize)(fn)
|
| 391 |
+
prior_len = len(self.replacements)
|
| 392 |
+
|
| 393 |
+
@functools.wraps(fn)
|
| 394 |
+
def wrapper(*args, **kwargs):
|
| 395 |
+
nonlocal prior_len
|
| 396 |
+
if prior_len != len(self.replacements):
|
| 397 |
+
prior_len = len(self.replacements)
|
| 398 |
+
fn_cache.cache_clear()
|
| 399 |
+
return fn_cache(*args, **kwargs)
|
| 400 |
+
|
| 401 |
+
return wrapper
|
| 402 |
+
|
| 403 |
+
def make_stride_vars_cache(self):
|
| 404 |
+
cache = self._lru_cache(self._stride_vars)
|
| 405 |
+
|
| 406 |
+
def stride_vars(
|
| 407 |
+
index: Expr,
|
| 408 |
+
vars: List[sympy.Symbol],
|
| 409 |
+
support_vars: List[sympy.Symbol] = None,
|
| 410 |
+
) -> List[Expr]:
|
| 411 |
+
if not support_vars:
|
| 412 |
+
support_vars = vars
|
| 413 |
+
return cache(index, tuple(vars), tuple(support_vars))
|
| 414 |
+
|
| 415 |
+
return stride_vars
|
| 416 |
+
|
| 417 |
+
def _stride_vars(
|
| 418 |
+
self, index: Expr, vars: List[sympy.Symbol], support_vars: List[sympy.Symbol]
|
| 419 |
+
) -> List[Expr]:
|
| 420 |
+
"""Convert an indexing expression back into strides
|
| 421 |
+
|
| 422 |
+
NOTE: This is only valid if the index is a standard strided offset
|
| 423 |
+
calculation. e.g. 10 * ModularIndexing(i0 + 1, 1, 2) would give a
|
| 424 |
+
stride of -10 because the index wraps around after the first element
|
| 425 |
+
|
| 426 |
+
"""
|
| 427 |
+
strides = []
|
| 428 |
+
index = self.simplify(index)
|
| 429 |
+
# remove any offset
|
| 430 |
+
index = index - sympy_subs(
|
| 431 |
+
index, {v: sympy.Integer(0) for v in support_vars if v != 0}
|
| 432 |
+
)
|
| 433 |
+
for i in range(len(vars)):
|
| 434 |
+
# drop all the other dims
|
| 435 |
+
index_dim = sympy_subs(
|
| 436 |
+
index,
|
| 437 |
+
{
|
| 438 |
+
support_vars[j]: sympy.Integer(0)
|
| 439 |
+
for j in range(len(support_vars))
|
| 440 |
+
if vars[i] != support_vars[j] and support_vars[j] != 0
|
| 441 |
+
},
|
| 442 |
+
)
|
| 443 |
+
v = vars[i]
|
| 444 |
+
if v == 0:
|
| 445 |
+
strides.append(sympy.Integer(0))
|
| 446 |
+
else:
|
| 447 |
+
# TODO(jansel): should we use sympy.diff here?
|
| 448 |
+
strides.append(
|
| 449 |
+
sympy_subs(index_dim, {v: sympy.Integer(1)})
|
| 450 |
+
- sympy_subs(index_dim, {v: sympy.Integer(0)})
|
| 451 |
+
)
|
| 452 |
+
return strides
|
| 453 |
+
|
| 454 |
+
def offset_var(self, index: Expr, vars: List[sympy.Symbol]) -> Expr:
|
| 455 |
+
"""Extract offset part of an indexing expression"""
|
| 456 |
+
index = self.simplify(index)
|
| 457 |
+
return sympy_subs(index, {v: sympy.Integer(0) for v in vars if v != 0})
|
| 458 |
+
|
| 459 |
+
def stride_hints(
|
| 460 |
+
self,
|
| 461 |
+
index: Expr,
|
| 462 |
+
vars: List[sympy.Symbol],
|
| 463 |
+
support_vars: List[sympy.Symbol] = None,
|
| 464 |
+
) -> List[int]:
|
| 465 |
+
for v in index.free_symbols:
|
| 466 |
+
if v.name.startswith("indirect"):
|
| 467 |
+
index = sympy_subs(index, {v: 0})
|
| 468 |
+
result = []
|
| 469 |
+
for s in self.stride_vars(index, vars, support_vars):
|
| 470 |
+
try:
|
| 471 |
+
result.append(self.size_hint(s))
|
| 472 |
+
except TypeError:
|
| 473 |
+
result.append(0)
|
| 474 |
+
return result
|
| 475 |
+
|
| 476 |
+
def stride_order(self, index: Expr, vars: List[sympy.Symbol]) -> List[int]:
|
| 477 |
+
strides = tuple(
|
| 478 |
+
map(abs, self.stride_hints(index, vars))
|
| 479 |
+
) # lambda to placate mypy
|
| 480 |
+
order = list(range(len(strides)))
|
| 481 |
+
order.sort(key=lambda x: (strides[x] == 0, strides[x]))
|
| 482 |
+
return order
|
| 483 |
+
|
| 484 |
+
def lookup_precomputed_size(self, expr: Expr):
|
| 485 |
+
if expr not in self.precomputed_replacements:
|
| 486 |
+
sym = sympy_symbol(f"ps{len(self.precomputed_replacements)}")
|
| 487 |
+
self.precomputed_replacements[expr] = sym
|
| 488 |
+
self.inv_precomputed_replacements[sym] = expr
|
| 489 |
+
return self.precomputed_replacements[expr]
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def join_dimensions(expr: Expr) -> Expr:
|
| 493 |
+
if not isinstance(expr, sympy.Add) or not expr.has(ModularIndexing):
|
| 494 |
+
return expr # fast exit path
|
| 495 |
+
return _join_dimensions_cached(expr)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
@functools.lru_cache(256)
|
| 499 |
+
def _join_dimensions_cached(expr: Expr) -> Expr:
|
| 500 |
+
"""
|
| 501 |
+
ModularIndexing(i0, 1, 32) + 32 * ModularIndexing(i0, 32, 4)
|
| 502 |
+
becomes
|
| 503 |
+
ModularIndexing(i0, 1, 128)
|
| 504 |
+
ModularIndexing(i0, 1, 32) + 32 * FloorDiv(i0, 32)
|
| 505 |
+
becomes i0
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
This type of pattern can come from view operations
|
| 509 |
+
"""
|
| 510 |
+
assert isinstance(expr, sympy.Add)
|
| 511 |
+
|
| 512 |
+
scale = sympy.Wild("scale", exclude=[0])
|
| 513 |
+
base = sympy.Wild("base")
|
| 514 |
+
divisor = sympy.Wild("divisor")
|
| 515 |
+
mod1 = sympy.Wild("modulus")
|
| 516 |
+
mod2 = sympy.Wild("modulus2")
|
| 517 |
+
for term1 in expr.args:
|
| 518 |
+
m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))
|
| 519 |
+
if m1:
|
| 520 |
+
for term2 in expr.args:
|
| 521 |
+
m2 = term2.match(
|
| 522 |
+
m1[scale]
|
| 523 |
+
* m1[mod1]
|
| 524 |
+
* ModularIndexing(m1[base], m1[divisor] * m1[mod1], mod2)
|
| 525 |
+
)
|
| 526 |
+
if m2 and term1 != term2:
|
| 527 |
+
expr = join_dimensions(
|
| 528 |
+
expr
|
| 529 |
+
- term1
|
| 530 |
+
- term2
|
| 531 |
+
+ m1[scale]
|
| 532 |
+
* ModularIndexing(m1[base], m1[divisor], m1[mod1] * m2[mod2])
|
| 533 |
+
)
|
| 534 |
+
return expr
|
| 535 |
+
for term1 in expr.args:
|
| 536 |
+
m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))
|
| 537 |
+
if m1:
|
| 538 |
+
for term2 in expr.args:
|
| 539 |
+
m2 = term2.match(
|
| 540 |
+
m1[scale] * m1[mod1] * FloorDiv(m1[base], m1[divisor] * m1[mod1])
|
| 541 |
+
)
|
| 542 |
+
if m2 is not None: # in case of success we get an empty dict here
|
| 543 |
+
expr = join_dimensions(
|
| 544 |
+
expr
|
| 545 |
+
- term1
|
| 546 |
+
- term2
|
| 547 |
+
+ m1[scale] * FloorDiv(m1[base], m1[divisor])
|
| 548 |
+
)
|
| 549 |
+
return expr
|
| 550 |
+
return expr
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
class SimplifyIndexing(V.WrapperHandler): # type: ignore[name-defined]
|
| 554 |
+
"""
|
| 555 |
+
A wrapper around .virtualize.ops that uses var range information to
|
| 556 |
+
simplify ModularIndexing/FloorDiv.
|
| 557 |
+
"""
|
| 558 |
+
|
| 559 |
+
def __init__(self, inner, var_ranges: VarRanges):
|
| 560 |
+
super().__init__(inner)
|
| 561 |
+
self.name = "SimplifyIndexing"
|
| 562 |
+
self._simplify: Callable[
|
| 563 |
+
[Expr], Expr
|
| 564 |
+
] = lambda index: V.graph.sizevars.simplify_with_ranges(index, var_ranges)
|
| 565 |
+
|
| 566 |
+
def load(self, name: str, index: sympy.Expr):
|
| 567 |
+
return self._inner.load(name, self._simplify(index))
|
| 568 |
+
|
| 569 |
+
def store(self, name, index, value, mode=None):
|
| 570 |
+
return self._inner.store(name, self._simplify(index), value, mode=mode)
|
| 571 |
+
|
| 572 |
+
def store_reduction(self, name, index, value):
|
| 573 |
+
return self._inner.store_reduction(name, self._simplify(index), value)
|
| 574 |
+
|
| 575 |
+
def index_expr(self, index, dtype):
|
| 576 |
+
return self._inner.index_expr(self._simplify(index), dtype)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/test_operators.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.library
|
| 2 |
+
from torch import Tensor
|
| 3 |
+
from torch.autograd import Function
|
| 4 |
+
|
| 5 |
+
_test_lib_def = torch.library.Library("_inductor_test", "DEF")
|
| 6 |
+
_test_lib_def.define("realize(Tensor self) -> Tensor")
|
| 7 |
+
|
| 8 |
+
_test_lib_impl = torch.library.Library("_inductor_test", "IMPL")
|
| 9 |
+
for dispatch_key in ("CPU", "CUDA", "Meta"):
|
| 10 |
+
_test_lib_impl.impl("realize", lambda x: x.clone(), dispatch_key)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Realize(Function):
|
| 14 |
+
@staticmethod
|
| 15 |
+
def forward(ctx, x):
|
| 16 |
+
return torch.ops._inductor_test.realize(x)
|
| 17 |
+
|
| 18 |
+
@staticmethod
|
| 19 |
+
def backward(ctx, grad_output):
|
| 20 |
+
return grad_output
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def realize(x: Tensor) -> Tensor:
|
| 24 |
+
return Realize.apply(x)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/triton_helpers.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import triton
|
| 2 |
+
import triton.language as tl
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@triton.jit
|
| 6 |
+
def promote_to_tensor(x):
|
| 7 |
+
# Addition promotes to tensor for us
|
| 8 |
+
return x + tl.zeros((1,), tl.int1)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@triton.jit
|
| 12 |
+
def is_floating(x):
|
| 13 |
+
return promote_to_tensor(x).dtype.is_floating()
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@triton.jit
|
| 17 |
+
def _prod_accumulate(a, b):
|
| 18 |
+
return a * b
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@triton.jit
|
| 22 |
+
def prod(input, axis):
|
| 23 |
+
return tl.reduce(input, axis, _prod_accumulate)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@triton.jit
|
| 27 |
+
def minimum(a, b):
|
| 28 |
+
mask = a < b
|
| 29 |
+
if is_floating(a):
|
| 30 |
+
mask |= a != a
|
| 31 |
+
return tl.where(mask, a, b)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@triton.jit
|
| 35 |
+
def maximum(a, b):
|
| 36 |
+
mask = a > b
|
| 37 |
+
if is_floating(a):
|
| 38 |
+
mask |= a != a
|
| 39 |
+
return tl.where(mask, a, b)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@triton.jit
|
| 43 |
+
def min2(a, dim):
|
| 44 |
+
return tl.reduce(a, dim, minimum)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@triton.jit
|
| 48 |
+
def max2(a, dim):
|
| 49 |
+
return tl.reduce(a, dim, maximum)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@triton.jit
|
| 53 |
+
def minimum_with_index(a_value, a_index, b_value, b_index):
|
| 54 |
+
mask = a_value < b_value
|
| 55 |
+
equal = a_value == b_value
|
| 56 |
+
if is_floating(a_value):
|
| 57 |
+
a_isnan = a_value != a_value
|
| 58 |
+
b_isnan = b_value != b_value
|
| 59 |
+
mask |= a_isnan and not b_isnan
|
| 60 |
+
# Consider NaNs as equal
|
| 61 |
+
equal |= a_isnan and b_isnan
|
| 62 |
+
|
| 63 |
+
# Prefer lowest index if values are equal
|
| 64 |
+
mask |= equal & (a_index < b_index)
|
| 65 |
+
return tl.where(mask, a_value, b_value), tl.where(mask, a_index, b_index)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@triton.jit
|
| 69 |
+
def maximum_with_index(a_value, a_index, b_value, b_index):
|
| 70 |
+
mask = a_value > b_value
|
| 71 |
+
equal = a_value == b_value
|
| 72 |
+
if is_floating(a_value):
|
| 73 |
+
a_isnan = a_value != a_value
|
| 74 |
+
b_isnan = b_value != b_value
|
| 75 |
+
mask |= a_isnan and not b_isnan
|
| 76 |
+
# Consider NaNs as equal
|
| 77 |
+
equal |= a_isnan and b_isnan
|
| 78 |
+
|
| 79 |
+
# Prefer lowest index if values are equal
|
| 80 |
+
mask |= equal & (a_index < b_index)
|
| 81 |
+
return tl.where(mask, a_value, b_value), tl.where(mask, a_index, b_index)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@triton.jit
|
| 85 |
+
def min_with_index(value, index, dim):
|
| 86 |
+
return tl.reduce((value, index), dim, minimum_with_index)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@triton.jit
|
| 90 |
+
def max_with_index(value, index, dim):
|
| 91 |
+
return tl.reduce((value, index), dim, maximum_with_index)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@triton.jit
|
| 95 |
+
def welford_reduce(value, mean, m2, weight):
|
| 96 |
+
delta = value - mean
|
| 97 |
+
new_weight = weight + 1
|
| 98 |
+
new_mean = mean + delta / new_weight
|
| 99 |
+
return (
|
| 100 |
+
new_mean,
|
| 101 |
+
m2 + delta * (value - new_mean),
|
| 102 |
+
new_weight,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@triton.jit
|
| 107 |
+
def welford_combine(mean_1, m2_1, weight_1, mean_2, m2_2, weight_2):
|
| 108 |
+
delta = mean_2 - mean_1
|
| 109 |
+
new_weight = weight_1 + weight_2
|
| 110 |
+
w2_over_w = tl.where(new_weight == 0.0, 0.0, weight_2 / new_weight)
|
| 111 |
+
return (
|
| 112 |
+
mean_1 + delta * w2_over_w,
|
| 113 |
+
m2_1 + m2_2 + delta * delta * weight_1 * w2_over_w,
|
| 114 |
+
new_weight,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@triton.jit
|
| 119 |
+
def welford(mean, m2, weight, dim):
|
| 120 |
+
return tl.reduce((mean, m2, weight), dim, welford_combine)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@triton.jit
|
| 124 |
+
def device_assert_then(cond, msg, r):
|
| 125 |
+
tl.device_assert(cond, msg)
|
| 126 |
+
return r
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@triton.jit
|
| 130 |
+
def randint64(seed, offset, low, high):
|
| 131 |
+
r0, r1, r2, r3 = tl.randint4x(seed, offset)
|
| 132 |
+
r0 = r0.to(tl.uint64)
|
| 133 |
+
r1 = r1.to(tl.uint64)
|
| 134 |
+
result = r0 | (r1 << 32)
|
| 135 |
+
size = high - low
|
| 136 |
+
result = result % size.to(tl.uint64)
|
| 137 |
+
result = result.to(tl.int64) + low
|
| 138 |
+
return result
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@triton.jit
|
| 142 |
+
def _any_combine(a, b):
|
| 143 |
+
return a | b
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@triton.jit
|
| 147 |
+
def any(a, dim):
|
| 148 |
+
return tl.reduce(a, dim, _any_combine)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
@triton.jit
|
| 152 |
+
def bucketize_binary_search(
|
| 153 |
+
values, # 1D tensor
|
| 154 |
+
offsets_ptr,
|
| 155 |
+
indexing_dtype,
|
| 156 |
+
right, # bool: if true, use intervals closed on the left; see [Note: Inductor bucketize op]
|
| 157 |
+
OFFSETS_SIZE: int,
|
| 158 |
+
BLOCK_SHAPE, # tuple/list of block shape
|
| 159 |
+
):
|
| 160 |
+
"""
|
| 161 |
+
See [Note: Inductor bucketize op]
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
low = tl.zeros(BLOCK_SHAPE, dtype=indexing_dtype)
|
| 165 |
+
high = tl.full(BLOCK_SHAPE, OFFSETS_SIZE, dtype=indexing_dtype)
|
| 166 |
+
|
| 167 |
+
full_range = OFFSETS_SIZE + 1
|
| 168 |
+
while full_range > 1:
|
| 169 |
+
mid = (high + low) // 2
|
| 170 |
+
mask = mid < OFFSETS_SIZE
|
| 171 |
+
bucket_upper_bound = tl.load(offsets_ptr + mid, mask=mask)
|
| 172 |
+
if right:
|
| 173 |
+
is_above = values >= bucket_upper_bound
|
| 174 |
+
else:
|
| 175 |
+
is_above = values > bucket_upper_bound
|
| 176 |
+
|
| 177 |
+
low = tl.where(is_above & mask, mid + 1, low)
|
| 178 |
+
high = tl.where(is_above, high, mid)
|
| 179 |
+
|
| 180 |
+
full_range = (full_range + 1) // 2
|
| 181 |
+
|
| 182 |
+
return low
|
llava_next/lib/python3.10/site-packages/torch/_inductor/triton_heuristics.py
ADDED
|
@@ -0,0 +1,1046 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
import copy
|
| 3 |
+
import functools
|
| 4 |
+
import hashlib
|
| 5 |
+
import inspect
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import operator
|
| 9 |
+
import os
|
| 10 |
+
import os.path
|
| 11 |
+
import re
|
| 12 |
+
import threading
|
| 13 |
+
from enum import auto, Enum
|
| 14 |
+
from typing import Any, Callable, List, Optional, Set, Tuple
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
|
| 18 |
+
import torch.autograd.profiler as autograd_profiler
|
| 19 |
+
from torch._dynamo.utils import dynamo_timed
|
| 20 |
+
|
| 21 |
+
from . import config
|
| 22 |
+
from .codecache import cache_dir, CudaKernelParamCache
|
| 23 |
+
from .coordinate_descent_tuner import CoordescTuner
|
| 24 |
+
|
| 25 |
+
from .ir import ReductionHint, TileHint
|
| 26 |
+
from .utils import (
|
| 27 |
+
ceildiv,
|
| 28 |
+
conditional_product,
|
| 29 |
+
create_bandwidth_info_str,
|
| 30 |
+
do_bench,
|
| 31 |
+
get_num_bytes,
|
| 32 |
+
has_triton,
|
| 33 |
+
next_power_of_2,
|
| 34 |
+
triton_config_to_hashable,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
log = logging.getLogger(__name__)
|
| 39 |
+
|
| 40 |
+
if has_triton():
|
| 41 |
+
import triton
|
| 42 |
+
from triton import Config
|
| 43 |
+
from triton.runtime.jit import get_cuda_stream, KernelInterface
|
| 44 |
+
else:
|
| 45 |
+
Config = object
|
| 46 |
+
get_cuda_stream = None
|
| 47 |
+
KernelInterface = object
|
| 48 |
+
triton = None
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class HeuristicType(Enum):
|
| 52 |
+
POINTWISE = auto()
|
| 53 |
+
REDUCTION = auto()
|
| 54 |
+
PERSISTENT_REDUCTION = auto()
|
| 55 |
+
TEMPLATE = auto()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class AutotuneHint(Enum):
|
| 59 |
+
ELEMENTS_PER_WARP_32 = 0
|
| 60 |
+
|
| 61 |
+
# Triton codegen tries to codegen set of AutotuneHints.
|
| 62 |
+
# Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>""
|
| 63 |
+
# which isn't valid python.
|
| 64 |
+
# Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32".
|
| 65 |
+
__repr__ = Enum.__str__
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def autotune_hints_to_configs(
|
| 69 |
+
hints: Set[AutotuneHint], size_hints, block_size
|
| 70 |
+
) -> List[Config]:
|
| 71 |
+
"""
|
| 72 |
+
AutotuneHints can be attached to the metadata of triton kernels for providing
|
| 73 |
+
suggestions about what to try for autotuning. One reason to do this is if there are
|
| 74 |
+
some configs that are only useful in specific scenarios, in which case we can avoid
|
| 75 |
+
wasting compile time on autotuning unless we know we are in one of those scenarios.
|
| 76 |
+
|
| 77 |
+
Based on those hints, this function will generate a list of additional autotuning
|
| 78 |
+
configs to try.
|
| 79 |
+
"""
|
| 80 |
+
xyz_options: Tuple[Tuple[Any, ...], ...]
|
| 81 |
+
configs = []
|
| 82 |
+
|
| 83 |
+
for hint in hints:
|
| 84 |
+
if hint == AutotuneHint.ELEMENTS_PER_WARP_32:
|
| 85 |
+
if len(size_hints) == 1:
|
| 86 |
+
xyz_options = ((block_size // 4,),)
|
| 87 |
+
elif len(size_hints) == 2:
|
| 88 |
+
xyz_options = ((block_size // 4, 1), (1, block_size // 4))
|
| 89 |
+
elif len(size_hints) == 3:
|
| 90 |
+
xyz_options = (
|
| 91 |
+
(block_size // 4, 1, 1),
|
| 92 |
+
(1, block_size // 4, 1),
|
| 93 |
+
(1, 1, block_size // 4),
|
| 94 |
+
)
|
| 95 |
+
for xyz in xyz_options:
|
| 96 |
+
configs.append(
|
| 97 |
+
triton_config( # type: ignore[misc]
|
| 98 |
+
size_hints,
|
| 99 |
+
*xyz,
|
| 100 |
+
num_elements_per_warp=32,
|
| 101 |
+
)
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
return configs
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def disable_pointwise_autotuning():
|
| 108 |
+
# Autotuning can give different benchmarking results from run to run, and
|
| 109 |
+
# therefore we disable autotuning when use_deterministic flag is on.
|
| 110 |
+
if torch.are_deterministic_algorithms_enabled():
|
| 111 |
+
return True
|
| 112 |
+
return not config.triton.autotune_pointwise
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class CachingAutotuner(KernelInterface):
|
| 116 |
+
"""
|
| 117 |
+
Simplified version of Triton autotuner that has no invalidation
|
| 118 |
+
key and caches the best config to disk to improve cold start times.
|
| 119 |
+
Unlike the main triton Autotuner, this version can precompile all
|
| 120 |
+
configs, and does not rely on the Triton JIT.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
def __init__(
|
| 124 |
+
self,
|
| 125 |
+
fn,
|
| 126 |
+
meta,
|
| 127 |
+
configs,
|
| 128 |
+
save_cache_hook,
|
| 129 |
+
mutated_arg_names,
|
| 130 |
+
heuristic_type,
|
| 131 |
+
size_hints=None,
|
| 132 |
+
):
|
| 133 |
+
super().__init__()
|
| 134 |
+
self.fn = fn
|
| 135 |
+
self.meta = meta
|
| 136 |
+
self.save_cache_hook = save_cache_hook
|
| 137 |
+
self.mutated_arg_names = mutated_arg_names
|
| 138 |
+
self.configs = configs
|
| 139 |
+
self.heuristic_type = heuristic_type
|
| 140 |
+
|
| 141 |
+
if log.isEnabledFor(logging.DEBUG):
|
| 142 |
+
log.debug("CachingAutotuner gets %d configs", len(self.configs))
|
| 143 |
+
for c in self.configs:
|
| 144 |
+
log.debug(c)
|
| 145 |
+
|
| 146 |
+
self.launchers = []
|
| 147 |
+
self.lock = threading.Lock()
|
| 148 |
+
if os.getenv("TRITON_CACHE_DIR") is None:
|
| 149 |
+
os.environ["TRITON_CACHE_DIR"] = os.path.join(
|
| 150 |
+
cache_dir(),
|
| 151 |
+
"triton",
|
| 152 |
+
str(self.meta.get("device", 0)),
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
self.coordesc_tuner = CoordescTuner(
|
| 156 |
+
is_mm=False, name=self.fn.__name__, size_hints=size_hints
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# pre-create the profiler context manager to reduce latency
|
| 160 |
+
self.record_function_ctx = torch._C._profiler._RecordFunctionFast(
|
| 161 |
+
self.meta.get("kernel_name", "triton kernel")
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
def precompile(self, warm_cache_only_with_cc=None):
|
| 165 |
+
with self.lock:
|
| 166 |
+
if self.launchers:
|
| 167 |
+
return
|
| 168 |
+
self.launchers = [
|
| 169 |
+
self._precompile_config(c, warm_cache_only_with_cc)
|
| 170 |
+
for c in self.configs
|
| 171 |
+
]
|
| 172 |
+
self.configs = None
|
| 173 |
+
|
| 174 |
+
def _precompile_config(self, cfg: Config, warm_cache_only_with_cc: Optional[int]):
|
| 175 |
+
"""Ahead of time compile a given autotuner config."""
|
| 176 |
+
compile_meta = copy.deepcopy(self.meta)
|
| 177 |
+
for k, v in cfg.kwargs.items():
|
| 178 |
+
compile_meta["constants"][self.fn.arg_names.index(k)] = v
|
| 179 |
+
compile_meta["num_warps"] = cfg.num_warps
|
| 180 |
+
compile_meta["num_stages"] = cfg.num_stages
|
| 181 |
+
compile_meta["debug"] = (
|
| 182 |
+
config.triton.assert_indirect_indexing and torch.version.hip is None
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
# Setting device_type="hip" required on ROCm to pass down to triton
|
| 186 |
+
compile_meta["device_type"] = "cuda" if torch.version.hip is None else "hip"
|
| 187 |
+
|
| 188 |
+
if warm_cache_only_with_cc:
|
| 189 |
+
triton.compile(
|
| 190 |
+
self.fn,
|
| 191 |
+
warm_cache_only=True,
|
| 192 |
+
cc=warm_cache_only_with_cc,
|
| 193 |
+
**compile_meta,
|
| 194 |
+
)
|
| 195 |
+
return
|
| 196 |
+
|
| 197 |
+
# load binary to the correct device
|
| 198 |
+
with torch.cuda.device(compile_meta["device"]):
|
| 199 |
+
# need to initialize context
|
| 200 |
+
torch.cuda.synchronize(torch.cuda.current_device())
|
| 201 |
+
binary = triton.compile(
|
| 202 |
+
self.fn,
|
| 203 |
+
**compile_meta,
|
| 204 |
+
)
|
| 205 |
+
binary._init_handles()
|
| 206 |
+
|
| 207 |
+
call_args = [
|
| 208 |
+
arg
|
| 209 |
+
for i, arg in enumerate(self.fn.arg_names)
|
| 210 |
+
if i not in self.fn.constexprs
|
| 211 |
+
]
|
| 212 |
+
def_args = list(self.fn.arg_names)
|
| 213 |
+
while def_args and def_args[-1] in cfg.kwargs:
|
| 214 |
+
def_args.pop()
|
| 215 |
+
|
| 216 |
+
scope = {
|
| 217 |
+
"grid_meta": cfg.kwargs,
|
| 218 |
+
"bin": binary,
|
| 219 |
+
"torch": torch,
|
| 220 |
+
"set_device": torch.cuda.set_device,
|
| 221 |
+
"current_device": torch.cuda.current_device,
|
| 222 |
+
}
|
| 223 |
+
exec(
|
| 224 |
+
f"""
|
| 225 |
+
def launcher({', '.join(def_args)}, grid, stream):
|
| 226 |
+
if callable(grid):
|
| 227 |
+
grid_0, grid_1, grid_2 = grid(grid_meta)
|
| 228 |
+
else:
|
| 229 |
+
grid_0, grid_1, grid_2 = grid
|
| 230 |
+
|
| 231 |
+
if hasattr(bin, "num_ctas"):
|
| 232 |
+
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps,
|
| 233 |
+
bin.num_ctas, *bin.clusterDims, bin.shared,
|
| 234 |
+
stream, bin.cu_function, None, None, None,
|
| 235 |
+
{', '.join(call_args)})
|
| 236 |
+
else:
|
| 237 |
+
bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared,
|
| 238 |
+
stream, bin.cu_function, None, None, None,
|
| 239 |
+
{', '.join(call_args)})
|
| 240 |
+
""".lstrip(),
|
| 241 |
+
scope,
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
launcher = scope["launcher"]
|
| 245 |
+
launcher.config = cfg
|
| 246 |
+
launcher.n_regs = getattr(binary, "n_regs", None)
|
| 247 |
+
launcher.n_spills = getattr(binary, "n_spills", None)
|
| 248 |
+
launcher.shared = getattr(binary, "shared", None)
|
| 249 |
+
launcher.store_cubin = config.triton.store_cubin
|
| 250 |
+
# store this global varible to avoid the high overhead of reading it when calling run
|
| 251 |
+
if launcher.store_cubin:
|
| 252 |
+
launcher.fn = self.fn
|
| 253 |
+
launcher.bin = binary
|
| 254 |
+
|
| 255 |
+
return launcher
|
| 256 |
+
|
| 257 |
+
def bench(self, launcher, *args, grid):
|
| 258 |
+
"""Measure the performance of a given launcher"""
|
| 259 |
+
if launcher.n_spills > config.triton.spill_threshold:
|
| 260 |
+
log.debug(
|
| 261 |
+
"Skip config %s because of register spilling: %d",
|
| 262 |
+
launcher.config,
|
| 263 |
+
launcher.n_spills,
|
| 264 |
+
)
|
| 265 |
+
return float("inf")
|
| 266 |
+
|
| 267 |
+
stream = get_cuda_stream(torch.cuda.current_device())
|
| 268 |
+
|
| 269 |
+
def kernel_call():
|
| 270 |
+
if launcher.config.pre_hook is not None:
|
| 271 |
+
launcher.config.pre_hook(
|
| 272 |
+
{**dict(zip(self.arg_names, args)), **launcher.config.kwargs}
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
cloned_args = self.clone_args(*args)
|
| 276 |
+
launcher(
|
| 277 |
+
*cloned_args,
|
| 278 |
+
grid=grid,
|
| 279 |
+
stream=stream,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
return do_bench(kernel_call, rep=40, fast_flush=True)
|
| 283 |
+
|
| 284 |
+
def clone_args(self, *args):
|
| 285 |
+
from .compile_fx import clone_preserve_strides
|
| 286 |
+
|
| 287 |
+
# clone inplace buffers to avoid autotune contaminating them if
|
| 288 |
+
# the kernel does in-place stores. avoid cloning other buffers because
|
| 289 |
+
# it leads to increase memory use
|
| 290 |
+
cloned_args = []
|
| 291 |
+
for i, arg in enumerate(args):
|
| 292 |
+
if self.fn.arg_names[i] in self.mutated_arg_names:
|
| 293 |
+
assert isinstance(arg, torch.Tensor)
|
| 294 |
+
cloned_args.append(clone_preserve_strides(arg))
|
| 295 |
+
else:
|
| 296 |
+
cloned_args.append(arg)
|
| 297 |
+
|
| 298 |
+
return cloned_args
|
| 299 |
+
|
| 300 |
+
@dynamo_timed
|
| 301 |
+
def benchmark_all_configs(self, *args, **kwargs):
|
| 302 |
+
timings = {
|
| 303 |
+
launcher: self.bench(launcher, *args, **kwargs)
|
| 304 |
+
for launcher in self.launchers
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
for k, v in timings.items():
|
| 308 |
+
self.coordesc_tuner.cache_benchmark_result(k.config, v)
|
| 309 |
+
|
| 310 |
+
if log.isEnabledFor(logging.DEBUG):
|
| 311 |
+
log.debug("Benchmark all input configs get:")
|
| 312 |
+
for k, v in timings.items():
|
| 313 |
+
log.debug(
|
| 314 |
+
"%s: %f, nreg %d, nspill %d, #shared-mem %d",
|
| 315 |
+
k.config,
|
| 316 |
+
v,
|
| 317 |
+
k.n_regs,
|
| 318 |
+
k.n_spills,
|
| 319 |
+
k.shared,
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
return timings
|
| 323 |
+
|
| 324 |
+
def autotune_to_one_config(self, *args, **kwargs):
|
| 325 |
+
"""Do the actual autotuning"""
|
| 326 |
+
timings = self.benchmark_all_configs(*args, **kwargs)
|
| 327 |
+
self.launchers = [builtins.min(timings, key=timings.get)]
|
| 328 |
+
if self.save_cache_hook:
|
| 329 |
+
self.save_cache_hook(self.launchers[0].config)
|
| 330 |
+
|
| 331 |
+
def save_cuda_kernel(self, grid, stream, launcher):
|
| 332 |
+
if callable(grid):
|
| 333 |
+
grid_x, grid_y, grid_z = grid(launcher.config.kwargs)
|
| 334 |
+
else:
|
| 335 |
+
grid_x, grid_y, grid_z = grid
|
| 336 |
+
|
| 337 |
+
key = launcher.fn.fn.__qualname__ # unique kernel name
|
| 338 |
+
params = {
|
| 339 |
+
"mangled_name": launcher.bin.metadata["name"],
|
| 340 |
+
"grid_x": grid_x,
|
| 341 |
+
"grid_y": grid_y,
|
| 342 |
+
"grid_z": grid_z,
|
| 343 |
+
"num_warps": launcher.bin.num_warps,
|
| 344 |
+
"shared_mem": launcher.bin.shared,
|
| 345 |
+
"stream": stream,
|
| 346 |
+
}
|
| 347 |
+
CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"])
|
| 348 |
+
|
| 349 |
+
def coordinate_descent_tuning(self, launcher, *args, **kwargs):
|
| 350 |
+
"""
|
| 351 |
+
Coordinate descent tuning can be run with or without max-autotune.
|
| 352 |
+
|
| 353 |
+
The only difference between these two is the starting config for coordinate_descent tuning.
|
| 354 |
+
E.g., assuming regular autotune only get one config C1; while max-autotune get 4 configs C1, C2, C3, C4
|
| 355 |
+
and max-autotune figure out C3 is the best.
|
| 356 |
+
|
| 357 |
+
Then if coordinate descnt tuning is run with max-autotune disabled, it will start from C1;
|
| 358 |
+
while if coordinate descent tuning is run with max-autotune enabled, it will start from C3.
|
| 359 |
+
"""
|
| 360 |
+
if self.heuristic_type == HeuristicType.TEMPLATE:
|
| 361 |
+
# skip triton template
|
| 362 |
+
return launcher
|
| 363 |
+
|
| 364 |
+
cloned_args = self.clone_args(*args)
|
| 365 |
+
config2launcher = {launcher.config: launcher}
|
| 366 |
+
|
| 367 |
+
def benchmark_one_config(config):
|
| 368 |
+
with self.lock:
|
| 369 |
+
launcher = self._precompile_config(config, None)
|
| 370 |
+
config2launcher[config] = launcher
|
| 371 |
+
|
| 372 |
+
out = self.bench(launcher, *cloned_args, **kwargs)
|
| 373 |
+
log.debug(
|
| 374 |
+
"COORDESC: %s: %f, nreg %d, nspill %d, #shared-mem %d",
|
| 375 |
+
launcher.config,
|
| 376 |
+
out,
|
| 377 |
+
launcher.n_regs,
|
| 378 |
+
launcher.n_spills,
|
| 379 |
+
launcher.shared,
|
| 380 |
+
)
|
| 381 |
+
return out
|
| 382 |
+
|
| 383 |
+
assert not (
|
| 384 |
+
self.heuristic_type == HeuristicType.PERSISTENT_REDUCTION
|
| 385 |
+
and "RBLOCK" in launcher.config.kwargs
|
| 386 |
+
), "Coordinate descent tuner relies on the assumption that persistent reduction's triton config does not have RBLOCK"
|
| 387 |
+
best_config = self.coordesc_tuner.autotune(
|
| 388 |
+
benchmark_one_config, launcher.config, None
|
| 389 |
+
)
|
| 390 |
+
best_config.found_by_coordesc = True
|
| 391 |
+
|
| 392 |
+
if self.save_cache_hook:
|
| 393 |
+
self.save_cache_hook(best_config, found_by_coordesc=True)
|
| 394 |
+
return config2launcher.get(best_config)
|
| 395 |
+
|
| 396 |
+
def run(self, *args, grid, stream):
|
| 397 |
+
if len(self.launchers) != 1:
|
| 398 |
+
if len(self.launchers) == 0:
|
| 399 |
+
self.precompile()
|
| 400 |
+
if len(self.launchers) > 1:
|
| 401 |
+
self.autotune_to_one_config(*args, grid=grid)
|
| 402 |
+
|
| 403 |
+
if (
|
| 404 |
+
not getattr(self.launchers[0].config, "found_by_coordesc", False)
|
| 405 |
+
and config.coordinate_descent_tuning
|
| 406 |
+
):
|
| 407 |
+
self.launchers = [
|
| 408 |
+
self.coordinate_descent_tuning(self.launchers[0], *args, grid=grid)
|
| 409 |
+
]
|
| 410 |
+
|
| 411 |
+
(launcher,) = self.launchers
|
| 412 |
+
if launcher.store_cubin:
|
| 413 |
+
self.save_cuda_kernel(grid, stream, launcher)
|
| 414 |
+
|
| 415 |
+
if launcher.config.pre_hook is not None:
|
| 416 |
+
launcher.config.pre_hook(
|
| 417 |
+
{**dict(zip(self.arg_names, args)), **launcher.config.kwargs}
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
# guard the record_function_ctx and only call it if profiling is currently
|
| 421 |
+
# in progress, to reduce latency when profiler is not turned on. Note that
|
| 422 |
+
# the "if" statement (instead of, say, a contextlib.nullcontext) is intentional;
|
| 423 |
+
# it is faster than entering and exiting a context manager, even if the context
|
| 424 |
+
# manager is a nullcontext.
|
| 425 |
+
if autograd_profiler._is_profiler_enabled:
|
| 426 |
+
with self.record_function_ctx:
|
| 427 |
+
return launcher(
|
| 428 |
+
*args,
|
| 429 |
+
grid=grid,
|
| 430 |
+
stream=stream,
|
| 431 |
+
)
|
| 432 |
+
else:
|
| 433 |
+
return launcher(
|
| 434 |
+
*args,
|
| 435 |
+
grid=grid,
|
| 436 |
+
stream=stream,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def _find_names(obj):
|
| 441 |
+
import gc
|
| 442 |
+
import inspect
|
| 443 |
+
|
| 444 |
+
frame = inspect.currentframe()
|
| 445 |
+
for frame in iter(lambda: frame.f_back, None): # type: ignore[union-attr]
|
| 446 |
+
frame.f_locals
|
| 447 |
+
obj_names = []
|
| 448 |
+
for referrer in gc.get_referrers(obj):
|
| 449 |
+
if isinstance(referrer, dict):
|
| 450 |
+
for k, v in referrer.items():
|
| 451 |
+
if v is obj:
|
| 452 |
+
obj_names.append(k)
|
| 453 |
+
return obj_names
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
collected_calls: List[Any] = []
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
def start_graph():
|
| 460 |
+
collected_calls.clear()
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def end_graph():
|
| 464 |
+
if len(collected_calls) == 0:
|
| 465 |
+
return
|
| 466 |
+
overall_time = sum(call[0] for call in collected_calls)
|
| 467 |
+
overall_gb = sum(call[1] for call in collected_calls)
|
| 468 |
+
cur_file = inspect.stack()[1].filename
|
| 469 |
+
print(f"SUMMARY ({cur_file})")
|
| 470 |
+
print(
|
| 471 |
+
f"{overall_time:.2f}ms \t {overall_gb:.2f} GB\t {overall_gb/(overall_time/1e3):.2f}GB/s"
|
| 472 |
+
)
|
| 473 |
+
print()
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
class DebugAutotuner(CachingAutotuner):
|
| 477 |
+
def __init__(self, *args, regex_filter="", **kwargs):
|
| 478 |
+
self.regex_filter = regex_filter
|
| 479 |
+
super().__init__(*args, **kwargs)
|
| 480 |
+
self.cached = None
|
| 481 |
+
|
| 482 |
+
def run(self, *args, grid, stream):
|
| 483 |
+
possible_names = _find_names(self)
|
| 484 |
+
kernel_name = f"{max(possible_names, key=lambda x: len(x))}"
|
| 485 |
+
if not re.match(self.regex_filter, kernel_name):
|
| 486 |
+
return
|
| 487 |
+
super().run(*args, grid=grid, stream=stream)
|
| 488 |
+
(launcher,) = self.launchers
|
| 489 |
+
|
| 490 |
+
if self.cached is None:
|
| 491 |
+
ms = self.bench(launcher, *args, grid=grid)
|
| 492 |
+
num_in_out_ptrs = len(
|
| 493 |
+
[
|
| 494 |
+
arg_name
|
| 495 |
+
for arg_name in self.fn.arg_names
|
| 496 |
+
if arg_name.startswith("in_out_ptr")
|
| 497 |
+
]
|
| 498 |
+
)
|
| 499 |
+
num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9
|
| 500 |
+
gb_per_s = num_gb / (ms / 1e3)
|
| 501 |
+
self.cached = (ms, num_gb, gb_per_s, kernel_name)
|
| 502 |
+
else:
|
| 503 |
+
ms, num_gb, gb_per_s, kernel_name = self.cached
|
| 504 |
+
collected_calls.append((ms, num_gb, gb_per_s, kernel_name))
|
| 505 |
+
print(
|
| 506 |
+
create_bandwidth_info_str(ms, num_gb, gb_per_s, suffix=f" \t {kernel_name}")
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def hash_configs(configs: List[Config]):
|
| 511 |
+
"""
|
| 512 |
+
Hash used to check for changes in configurations
|
| 513 |
+
"""
|
| 514 |
+
hasher = hashlib.sha256()
|
| 515 |
+
for cfg in configs:
|
| 516 |
+
hasher.update(
|
| 517 |
+
f"{sorted(cfg.kwargs.items())} {cfg.num_warps} {cfg.num_stages}\n".encode()
|
| 518 |
+
)
|
| 519 |
+
return hasher.hexdigest()
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def load_cached_autotuning(
|
| 523 |
+
cache_filename: str, configs_hash: str, configs: List[Config]
|
| 524 |
+
):
|
| 525 |
+
"""
|
| 526 |
+
Read a cached autotuning result from disk
|
| 527 |
+
"""
|
| 528 |
+
if not os.path.exists(cache_filename):
|
| 529 |
+
return None
|
| 530 |
+
|
| 531 |
+
with open(cache_filename) as fd:
|
| 532 |
+
best_config = json.loads(fd.read())
|
| 533 |
+
if best_config.pop("configs_hash", None) != configs_hash:
|
| 534 |
+
return None
|
| 535 |
+
|
| 536 |
+
if config.coordinate_descent_tuning and best_config.pop("found_by_coordesc", False):
|
| 537 |
+
num_warps = best_config.pop("num_warps")
|
| 538 |
+
num_stages = best_config.pop("num_stages")
|
| 539 |
+
triton_config = Config(best_config, num_warps=num_warps, num_stages=num_stages)
|
| 540 |
+
triton_config.found_by_coordesc = True
|
| 541 |
+
return triton_config
|
| 542 |
+
|
| 543 |
+
matching_configs = [
|
| 544 |
+
cfg
|
| 545 |
+
for cfg in configs
|
| 546 |
+
if all(val == best_config.get(key) for key, val in cfg.kwargs.items())
|
| 547 |
+
and cfg.num_warps == best_config.get("num_warps")
|
| 548 |
+
and cfg.num_stages == best_config.get("num_stages")
|
| 549 |
+
]
|
| 550 |
+
if len(matching_configs) != 1:
|
| 551 |
+
return None
|
| 552 |
+
|
| 553 |
+
return matching_configs[0]
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def cached_autotune(
|
| 557 |
+
size_hints: Optional[List[int]],
|
| 558 |
+
configs: List[Config],
|
| 559 |
+
meta,
|
| 560 |
+
heuristic_type,
|
| 561 |
+
filename=None,
|
| 562 |
+
):
|
| 563 |
+
"""
|
| 564 |
+
A copy of triton.autotune that calls our subclass. Our subclass
|
| 565 |
+
has additional debugging, error handling, and on-disk caching.
|
| 566 |
+
"""
|
| 567 |
+
configs = unique_configs(configs)
|
| 568 |
+
assert len(configs) == 1 or filename
|
| 569 |
+
save_cache_hook: Optional[Callable[[Any, Any], Any]]
|
| 570 |
+
|
| 571 |
+
# on disk caching logic
|
| 572 |
+
if filename is not None and (len(configs) > 1 or config.coordinate_descent_tuning):
|
| 573 |
+
cache_filename = os.path.splitext(filename)[0] + ".best_config"
|
| 574 |
+
configs_hash = hash_configs(configs)
|
| 575 |
+
best_config = load_cached_autotuning(cache_filename, configs_hash, configs)
|
| 576 |
+
if best_config:
|
| 577 |
+
configs = [best_config]
|
| 578 |
+
|
| 579 |
+
def save_cache_hook(cfg, found_by_coordesc=False):
|
| 580 |
+
with open(cache_filename, "w") as fd:
|
| 581 |
+
fd.write(
|
| 582 |
+
json.dumps(
|
| 583 |
+
{
|
| 584 |
+
**cfg.kwargs,
|
| 585 |
+
"num_warps": cfg.num_warps,
|
| 586 |
+
"num_stages": cfg.num_stages,
|
| 587 |
+
"configs_hash": configs_hash,
|
| 588 |
+
"found_by_coordesc": found_by_coordesc,
|
| 589 |
+
}
|
| 590 |
+
)
|
| 591 |
+
)
|
| 592 |
+
if log.isEnabledFor(logging.DEBUG):
|
| 593 |
+
type_str = "coordesc" if found_by_coordesc else "heuristic"
|
| 594 |
+
log.debug("Save %s tuning result to %s", type_str, cache_filename)
|
| 595 |
+
|
| 596 |
+
else:
|
| 597 |
+
save_cache_hook = None
|
| 598 |
+
|
| 599 |
+
mutated_arg_names = meta.pop("mutated_arg_names", ())
|
| 600 |
+
|
| 601 |
+
def decorator(fn):
|
| 602 |
+
# Remove XBLOCK from config if it's not a function argument.
|
| 603 |
+
# This way, coordinate descent tuning will not try to tune it.
|
| 604 |
+
#
|
| 605 |
+
# Context: When TritonKernel.no_x_dim is True, we hardcode XBLOCK to 1.
|
| 606 |
+
import inspect
|
| 607 |
+
|
| 608 |
+
if "XBLOCK" not in inspect.signature(fn.fn).parameters:
|
| 609 |
+
for tconfig in configs:
|
| 610 |
+
if "XBLOCK" in tconfig.kwargs:
|
| 611 |
+
assert tconfig.kwargs["XBLOCK"] == 1
|
| 612 |
+
tconfig.kwargs.pop("XBLOCK")
|
| 613 |
+
|
| 614 |
+
if config.profile_bandwidth:
|
| 615 |
+
return DebugAutotuner(
|
| 616 |
+
fn,
|
| 617 |
+
meta=meta,
|
| 618 |
+
regex_filter=config.profile_bandwidth_regex,
|
| 619 |
+
configs=configs,
|
| 620 |
+
save_cache_hook=save_cache_hook,
|
| 621 |
+
mutated_arg_names=mutated_arg_names,
|
| 622 |
+
heuristic_type=heuristic_type,
|
| 623 |
+
size_hints=size_hints,
|
| 624 |
+
)
|
| 625 |
+
return CachingAutotuner(
|
| 626 |
+
fn,
|
| 627 |
+
meta=meta,
|
| 628 |
+
configs=configs,
|
| 629 |
+
save_cache_hook=save_cache_hook,
|
| 630 |
+
mutated_arg_names=mutated_arg_names,
|
| 631 |
+
heuristic_type=heuristic_type,
|
| 632 |
+
size_hints=size_hints,
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
return decorator
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def unique_configs(configs: List[Config]):
|
| 639 |
+
"""Remove duplicate configurations"""
|
| 640 |
+
seen = set()
|
| 641 |
+
pruned_configs = []
|
| 642 |
+
|
| 643 |
+
for cfg in configs:
|
| 644 |
+
key = triton_config_to_hashable(cfg)
|
| 645 |
+
if key not in seen:
|
| 646 |
+
seen.add(key)
|
| 647 |
+
pruned_configs.append(cfg)
|
| 648 |
+
return pruned_configs
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def check_config(cfg, *, xnumel=None, ynumel=None, znumel=None):
|
| 652 |
+
for numel, label in zip((xnumel, ynumel, znumel), "XYZ"):
|
| 653 |
+
if numel is None:
|
| 654 |
+
continue
|
| 655 |
+
block = cfg[f"{label}BLOCK"]
|
| 656 |
+
if numel == 1:
|
| 657 |
+
assert block == 1, (
|
| 658 |
+
f"TritonKernel.indexing assumes numel == 1 => BLOCK == 1"
|
| 659 |
+
f" but {label.lower()}numel=={numel} and {label}BLOCK={block} (cfg={cfg})."
|
| 660 |
+
)
|
| 661 |
+
max_block = config.triton.max_block[label]
|
| 662 |
+
max_block_str = f'config.triton.max_block["{label}"]'
|
| 663 |
+
assert max_block % block == 0, (
|
| 664 |
+
f"TritonKernel.indexing assumes {label}BLOCK divides {max_block_str}"
|
| 665 |
+
f" but {label}BLOCK={block} and {max_block_str}={max_block} (cfg={cfg})."
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
def triton_config(
|
| 670 |
+
size_hints, x, y=None, z=None, num_stages=1, num_elements_per_warp=256
|
| 671 |
+
) -> Config:
|
| 672 |
+
"""
|
| 673 |
+
Construct a pointwise triton config with some adjustment heuristics
|
| 674 |
+
based on size_hints. Size_hints is a tuple of numels in each tile
|
| 675 |
+
dimension and will be rounded up to the nearest power of 2.
|
| 676 |
+
|
| 677 |
+
num_elements_per_warp is a suggestion for controlling how many warps
|
| 678 |
+
the triton config should contain. e.g.: if x=16, y=8, z=4 then
|
| 679 |
+
num_elements = 16*8*4 = 512. Then if we set num_elements_per_warp=128,
|
| 680 |
+
we'll launch 512 (elem) / 128 (elem/warp) = 4 warps. Note that it's
|
| 681 |
+
just a suggestion, and sometimes other adjustment heuristics will
|
| 682 |
+
override the num_elements_per_warp.
|
| 683 |
+
"""
|
| 684 |
+
# Ideally we want to read this from some device config
|
| 685 |
+
|
| 686 |
+
# for a 2d size_hints [a, b], a should be mapped to YBLOCK rather than XBLOCK
|
| 687 |
+
size_hints = list(reversed(size_hints))
|
| 688 |
+
|
| 689 |
+
maxGridSize = [2147483647, 65535, 65535]
|
| 690 |
+
|
| 691 |
+
target = conditional_product(x, y, z)
|
| 692 |
+
if conditional_product(*size_hints) < target:
|
| 693 |
+
target //= 8
|
| 694 |
+
|
| 695 |
+
# shrink sizes to size hints
|
| 696 |
+
x = min(x, size_hints[0])
|
| 697 |
+
if y:
|
| 698 |
+
y = min(y, size_hints[1])
|
| 699 |
+
if z:
|
| 700 |
+
z = min(z, size_hints[2])
|
| 701 |
+
|
| 702 |
+
# if we are below original block size, scale up where we can;
|
| 703 |
+
# or if the calculated grid size is larger than the limit, we bump up the corresponding dimension
|
| 704 |
+
while x < min(size_hints[0], config.triton.max_block["X"]) and (
|
| 705 |
+
x * maxGridSize[0] < size_hints[0] or conditional_product(x, y, z) < target
|
| 706 |
+
):
|
| 707 |
+
x *= 2
|
| 708 |
+
while (
|
| 709 |
+
y
|
| 710 |
+
and y < min(size_hints[1], config.triton.max_block["Y"])
|
| 711 |
+
and (
|
| 712 |
+
y * maxGridSize[1] < size_hints[1] or conditional_product(x, y, z) < target
|
| 713 |
+
)
|
| 714 |
+
):
|
| 715 |
+
y *= 2
|
| 716 |
+
while (
|
| 717 |
+
z
|
| 718 |
+
and z < min(size_hints[2], config.triton.max_block["Z"])
|
| 719 |
+
and (
|
| 720 |
+
z * maxGridSize[2] < size_hints[2] or conditional_product(x, y, z) < target
|
| 721 |
+
)
|
| 722 |
+
):
|
| 723 |
+
z *= 2
|
| 724 |
+
|
| 725 |
+
cfg = {"XBLOCK": x}
|
| 726 |
+
if y:
|
| 727 |
+
cfg["YBLOCK"] = y
|
| 728 |
+
if z:
|
| 729 |
+
cfg["ZBLOCK"] = z
|
| 730 |
+
num_warps = next_power_of_2(
|
| 731 |
+
min(max(conditional_product(x, y, z) // num_elements_per_warp, 1), 8)
|
| 732 |
+
)
|
| 733 |
+
# we are going to arrive at 2 warps only if bs was too small due to
|
| 734 |
+
# numel being too small. However to workaround some ptx bugs we still
|
| 735 |
+
# want at least 4 warps if there's enough elements per thread
|
| 736 |
+
# given that this is a rare situation, don't expect this to affect perf
|
| 737 |
+
# in general
|
| 738 |
+
# see https://github.com/pytorch/pytorch/pull/97950
|
| 739 |
+
num_warps = max(num_warps, 4) if conditional_product(x, y, z) >= 128 else num_warps
|
| 740 |
+
xnumel = size_hints[0]
|
| 741 |
+
ynumel = size_hints[1] if y else None
|
| 742 |
+
znumel = size_hints[2] if z else None
|
| 743 |
+
check_config(cfg, xnumel=xnumel, ynumel=ynumel, znumel=znumel)
|
| 744 |
+
return Config(cfg, num_warps=num_warps, num_stages=num_stages)
|
| 745 |
+
|
| 746 |
+
|
| 747 |
+
def triton_config_reduction(size_hints, x, r, num_stages=1, num_warps=None) -> Config:
|
| 748 |
+
"""
|
| 749 |
+
Construct a reduction triton config with some adjustment heuristics
|
| 750 |
+
based on size_hints. Size_hints is a tuple of numels in each tile
|
| 751 |
+
dimension and will be rounded up to the nearest power of 2.
|
| 752 |
+
"""
|
| 753 |
+
|
| 754 |
+
target = conditional_product(x, r)
|
| 755 |
+
if conditional_product(*size_hints) < target:
|
| 756 |
+
target //= 8
|
| 757 |
+
|
| 758 |
+
# shrink sizes to size hints
|
| 759 |
+
x = min(x, size_hints[0])
|
| 760 |
+
r = min(r, size_hints[1])
|
| 761 |
+
|
| 762 |
+
# if we are below original block size, scale up where we can
|
| 763 |
+
while x < size_hints[0] and conditional_product(x, r) < target:
|
| 764 |
+
x *= 2
|
| 765 |
+
while r < size_hints[1] and conditional_product(x, r) < target:
|
| 766 |
+
r *= 2
|
| 767 |
+
|
| 768 |
+
cfg = {"XBLOCK": x, "RBLOCK": r}
|
| 769 |
+
if num_warps is None:
|
| 770 |
+
num_warps = conditional_product(x, r) // 128
|
| 771 |
+
num_warps = next_power_of_2(min(max(num_warps, 2), 8))
|
| 772 |
+
check_config(cfg, xnumel=size_hints[0])
|
| 773 |
+
return Config(cfg, num_warps=num_warps, num_stages=num_stages)
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def triton_config_tiled_reduction(size_hints, x, y, r, num_stages=1):
|
| 777 |
+
"""
|
| 778 |
+
Construct a tile reduction triton config with some adjustment
|
| 779 |
+
heuristics based on size_hints. Size_hints is a tuple of numels in
|
| 780 |
+
each tile dimension and will be rounded up to the nearest power of 2.
|
| 781 |
+
"""
|
| 782 |
+
|
| 783 |
+
target = conditional_product(x, y, r)
|
| 784 |
+
if conditional_product(*size_hints) < target:
|
| 785 |
+
target //= 8
|
| 786 |
+
|
| 787 |
+
# shrink sizes to size hints
|
| 788 |
+
x = min(x, size_hints[0])
|
| 789 |
+
y = min(y, size_hints[1])
|
| 790 |
+
r = min(r, size_hints[2])
|
| 791 |
+
|
| 792 |
+
# if we are below original block size, scale up where we can
|
| 793 |
+
while x < size_hints[0] and conditional_product(x, y, r) < target:
|
| 794 |
+
x *= 2
|
| 795 |
+
while r < size_hints[2] and conditional_product(x, y, r) < target:
|
| 796 |
+
r *= 2
|
| 797 |
+
while y < size_hints[1] and conditional_product(x, y, r) < target:
|
| 798 |
+
y *= 2
|
| 799 |
+
|
| 800 |
+
cfg = {"XBLOCK": x, "YBLOCK": y, "RBLOCK": r}
|
| 801 |
+
num_warps = next_power_of_2(min(max(conditional_product(x, y, r) // 256, 1), 8))
|
| 802 |
+
check_config(cfg, xnumel=size_hints[0], ynumel=size_hints[1])
|
| 803 |
+
return Config(cfg, num_warps=num_warps, num_stages=num_stages)
|
| 804 |
+
|
| 805 |
+
|
| 806 |
+
def pointwise(size_hints, meta, tile_hint=None, filename=None):
|
| 807 |
+
"""
|
| 808 |
+
Construct @triton.heuristics() based on size_hints.
|
| 809 |
+
"""
|
| 810 |
+
numel = functools.reduce(operator.mul, size_hints)
|
| 811 |
+
bs = max(256, min(numel // 128, 1024))
|
| 812 |
+
|
| 813 |
+
hinted_configs = autotune_hints_to_configs(
|
| 814 |
+
meta.get("autotune_hints", set()), size_hints, bs
|
| 815 |
+
)
|
| 816 |
+
|
| 817 |
+
if len(size_hints) == 1:
|
| 818 |
+
if disable_pointwise_autotuning() and not (
|
| 819 |
+
config.max_autotune or config.max_autotune_pointwise
|
| 820 |
+
):
|
| 821 |
+
return cached_autotune(
|
| 822 |
+
size_hints,
|
| 823 |
+
[triton_config(size_hints, bs)],
|
| 824 |
+
meta=meta,
|
| 825 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 826 |
+
filename=filename,
|
| 827 |
+
)
|
| 828 |
+
else:
|
| 829 |
+
return cached_autotune(
|
| 830 |
+
size_hints,
|
| 831 |
+
[
|
| 832 |
+
triton_config(size_hints, bs, num_elements_per_warp=256),
|
| 833 |
+
triton_config(size_hints, bs // 2, num_elements_per_warp=64),
|
| 834 |
+
*hinted_configs,
|
| 835 |
+
],
|
| 836 |
+
meta=meta,
|
| 837 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 838 |
+
filename=filename,
|
| 839 |
+
)
|
| 840 |
+
if len(size_hints) == 2:
|
| 841 |
+
if (disable_pointwise_autotuning() or tile_hint == TileHint.SQUARE) and not (
|
| 842 |
+
config.max_autotune or config.max_autotune_pointwise
|
| 843 |
+
):
|
| 844 |
+
return cached_autotune(
|
| 845 |
+
size_hints,
|
| 846 |
+
[triton_config(size_hints, 32, 32)],
|
| 847 |
+
meta=meta,
|
| 848 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 849 |
+
filename=filename,
|
| 850 |
+
)
|
| 851 |
+
return cached_autotune(
|
| 852 |
+
size_hints,
|
| 853 |
+
[
|
| 854 |
+
triton_config(size_hints, 32, 32),
|
| 855 |
+
triton_config(size_hints, 64, 64), # ~8% better for fp16
|
| 856 |
+
triton_config(size_hints, 256, 16),
|
| 857 |
+
triton_config(size_hints, 16, 256),
|
| 858 |
+
triton_config(size_hints, bs, 1),
|
| 859 |
+
triton_config(size_hints, 1, bs),
|
| 860 |
+
*hinted_configs,
|
| 861 |
+
],
|
| 862 |
+
meta=meta,
|
| 863 |
+
filename=filename,
|
| 864 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 865 |
+
)
|
| 866 |
+
if len(size_hints) == 3:
|
| 867 |
+
if disable_pointwise_autotuning():
|
| 868 |
+
return cached_autotune(
|
| 869 |
+
size_hints,
|
| 870 |
+
[triton_config(size_hints, 16, 16, 16)],
|
| 871 |
+
meta=meta,
|
| 872 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 873 |
+
filename=filename,
|
| 874 |
+
)
|
| 875 |
+
return cached_autotune(
|
| 876 |
+
size_hints,
|
| 877 |
+
[
|
| 878 |
+
triton_config(size_hints, 16, 16, 16),
|
| 879 |
+
triton_config(size_hints, 64, 8, 8),
|
| 880 |
+
triton_config(size_hints, 8, 64, 8),
|
| 881 |
+
triton_config(size_hints, 8, 8, 64),
|
| 882 |
+
triton_config(size_hints, bs, 1, 1),
|
| 883 |
+
triton_config(size_hints, 1, bs, 1),
|
| 884 |
+
triton_config(size_hints, 1, 1, bs),
|
| 885 |
+
*hinted_configs,
|
| 886 |
+
],
|
| 887 |
+
meta=meta,
|
| 888 |
+
filename=filename,
|
| 889 |
+
heuristic_type=HeuristicType.POINTWISE,
|
| 890 |
+
)
|
| 891 |
+
raise NotImplementedError(f"size_hints: {size_hints}")
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
def reduction(size_hints, reduction_hint=False, meta=None, filename=None):
|
| 895 |
+
"""args to @triton.heuristics()"""
|
| 896 |
+
assert meta is not None
|
| 897 |
+
rnumel = size_hints[-1]
|
| 898 |
+
if len(size_hints) == 2:
|
| 899 |
+
contiguous_config = triton_config_reduction(
|
| 900 |
+
size_hints, 1, (rnumel if 256 <= rnumel < 2048 else 2048)
|
| 901 |
+
)
|
| 902 |
+
outer_config = triton_config_reduction(size_hints, 128, 8)
|
| 903 |
+
tiny_config = triton_config_reduction(
|
| 904 |
+
size_hints, 2 * (256 // rnumel) if rnumel <= 256 else 1, min(rnumel, 2048)
|
| 905 |
+
)
|
| 906 |
+
if config.max_autotune or config.max_autotune_pointwise:
|
| 907 |
+
pass # skip all these cases
|
| 908 |
+
elif reduction_hint == ReductionHint.INNER:
|
| 909 |
+
return cached_autotune(
|
| 910 |
+
size_hints,
|
| 911 |
+
[contiguous_config],
|
| 912 |
+
meta=meta,
|
| 913 |
+
heuristic_type=HeuristicType.REDUCTION,
|
| 914 |
+
filename=filename,
|
| 915 |
+
)
|
| 916 |
+
elif reduction_hint == ReductionHint.OUTER:
|
| 917 |
+
return cached_autotune(
|
| 918 |
+
size_hints,
|
| 919 |
+
[outer_config],
|
| 920 |
+
meta=meta,
|
| 921 |
+
heuristic_type=HeuristicType.REDUCTION,
|
| 922 |
+
filename=filename,
|
| 923 |
+
)
|
| 924 |
+
elif reduction_hint == ReductionHint.OUTER_TINY:
|
| 925 |
+
return cached_autotune(
|
| 926 |
+
size_hints,
|
| 927 |
+
[tiny_config],
|
| 928 |
+
meta=meta,
|
| 929 |
+
heuristic_type=HeuristicType.REDUCTION,
|
| 930 |
+
filename=filename,
|
| 931 |
+
)
|
| 932 |
+
if disable_pointwise_autotuning():
|
| 933 |
+
return cached_autotune(
|
| 934 |
+
size_hints,
|
| 935 |
+
[triton_config_reduction(size_hints, 32, 128)],
|
| 936 |
+
meta=meta,
|
| 937 |
+
heuristic_type=HeuristicType.REDUCTION,
|
| 938 |
+
filename=filename,
|
| 939 |
+
)
|
| 940 |
+
return cached_autotune(
|
| 941 |
+
size_hints,
|
| 942 |
+
[
|
| 943 |
+
contiguous_config,
|
| 944 |
+
outer_config,
|
| 945 |
+
tiny_config,
|
| 946 |
+
triton_config_reduction(size_hints, 64, 64),
|
| 947 |
+
triton_config_reduction(size_hints, 8, 512),
|
| 948 |
+
# halve the XBLOCK/RBLOCK compared to outer_config
|
| 949 |
+
# TODO: this may only be beneficial when each iteration of the reduciton
|
| 950 |
+
# is quite heavy. E.g. https://gist.github.com/shunting314/189a8ef69f90db9d614a823385147a72
|
| 951 |
+
triton_config_reduction(size_hints, 64, 4, num_warps=8),
|
| 952 |
+
],
|
| 953 |
+
meta=meta,
|
| 954 |
+
filename=filename,
|
| 955 |
+
heuristic_type=HeuristicType.REDUCTION,
|
| 956 |
+
)
|
| 957 |
+
raise NotImplementedError(f"size_hints: {size_hints}")
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
def persistent_reduction(size_hints, reduction_hint=False, meta=None, filename=None):
|
| 961 |
+
xnumel, rnumel = size_hints
|
| 962 |
+
|
| 963 |
+
configs = [
|
| 964 |
+
triton_config_reduction(size_hints, xblock, rnumel)
|
| 965 |
+
for xblock in (1, 8, 32, 128)
|
| 966 |
+
if rnumel * xblock <= 4096 and xblock <= xnumel
|
| 967 |
+
]
|
| 968 |
+
|
| 969 |
+
# TODO(jansel): we should be able to improve these heuristics
|
| 970 |
+
if reduction_hint == ReductionHint.INNER and rnumel >= 256:
|
| 971 |
+
configs = configs[:1]
|
| 972 |
+
elif reduction_hint == ReductionHint.OUTER:
|
| 973 |
+
configs = configs[-1:]
|
| 974 |
+
elif reduction_hint == ReductionHint.OUTER_TINY:
|
| 975 |
+
configs = [
|
| 976 |
+
triton_config_reduction(
|
| 977 |
+
size_hints, 2 * (256 // rnumel) if rnumel <= 256 else 1, rnumel
|
| 978 |
+
)
|
| 979 |
+
]
|
| 980 |
+
for c in configs:
|
| 981 |
+
# we don't need RBLOCK for persistent reduction
|
| 982 |
+
c.kwargs.pop("RBLOCK")
|
| 983 |
+
|
| 984 |
+
if disable_pointwise_autotuning():
|
| 985 |
+
configs = configs[:1]
|
| 986 |
+
|
| 987 |
+
return cached_autotune(
|
| 988 |
+
size_hints,
|
| 989 |
+
configs,
|
| 990 |
+
meta=meta,
|
| 991 |
+
filename=filename,
|
| 992 |
+
heuristic_type=HeuristicType.PERSISTENT_REDUCTION,
|
| 993 |
+
)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
def template(num_stages, num_warps, meta, filename=None):
|
| 997 |
+
"""
|
| 998 |
+
Compile a triton template
|
| 999 |
+
"""
|
| 1000 |
+
return cached_autotune(
|
| 1001 |
+
None,
|
| 1002 |
+
[triton.Config({}, num_stages=num_stages, num_warps=num_warps)],
|
| 1003 |
+
meta=meta,
|
| 1004 |
+
heuristic_type=HeuristicType.TEMPLATE,
|
| 1005 |
+
filename=filename,
|
| 1006 |
+
)
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
def foreach(meta, num_warps, filename=None):
|
| 1010 |
+
"""
|
| 1011 |
+
Compile a triton foreach kernel
|
| 1012 |
+
"""
|
| 1013 |
+
return cached_autotune(
|
| 1014 |
+
None,
|
| 1015 |
+
[triton.Config({}, num_stages=1, num_warps=num_warps)],
|
| 1016 |
+
meta=meta,
|
| 1017 |
+
heuristic_type=HeuristicType.TEMPLATE,
|
| 1018 |
+
filename=filename,
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
|
| 1022 |
+
def grid(*numels):
|
| 1023 |
+
"""Helper function to compute triton grids"""
|
| 1024 |
+
|
| 1025 |
+
if len(numels) == 1:
|
| 1026 |
+
xnumel, ynumel, znumel = numels[0], None, None
|
| 1027 |
+
elif len(numels) == 2:
|
| 1028 |
+
xnumel, ynumel, znumel = numels[1], numels[0], None
|
| 1029 |
+
elif len(numels) == 3:
|
| 1030 |
+
xnumel, ynumel, znumel = numels[2], numels[1], numels[0]
|
| 1031 |
+
else:
|
| 1032 |
+
raise AssertionError(f"invalid size for numels {len(numels)}")
|
| 1033 |
+
|
| 1034 |
+
def get_grid_dim(numel, block):
|
| 1035 |
+
if numel is None:
|
| 1036 |
+
return 1
|
| 1037 |
+
return ceildiv(numel, block)
|
| 1038 |
+
|
| 1039 |
+
def grid_fn(meta):
|
| 1040 |
+
return (
|
| 1041 |
+
get_grid_dim(xnumel, meta.get("XBLOCK", 1)),
|
| 1042 |
+
get_grid_dim(ynumel, meta.get("YBLOCK", None)),
|
| 1043 |
+
get_grid_dim(znumel, meta.get("ZBLOCK", None)),
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
return grid_fn
|
llava_next/lib/python3.10/site-packages/torch/_inductor/virtualized.py
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from contextlib import contextmanager
|
| 3 |
+
from itertools import chain
|
| 4 |
+
from threading import local
|
| 5 |
+
from typing import Any
|
| 6 |
+
from unittest.mock import patch
|
| 7 |
+
|
| 8 |
+
import sympy
|
| 9 |
+
|
| 10 |
+
from torch._inductor.utils import IndentedBuffer
|
| 11 |
+
|
| 12 |
+
from torch.fx.graph import inplace_methods, magic_methods
|
| 13 |
+
|
| 14 |
+
from .utils import reduction_num_outputs, sympy_str, sympy_symbol
|
| 15 |
+
|
| 16 |
+
threadlocal = local()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class Virtualized:
|
| 20 |
+
"""
|
| 21 |
+
A global variable that redirects via thread local variable
|
| 22 |
+
|
| 23 |
+
This allows us to swap in different op implementations in codegen.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, vname, default):
|
| 27 |
+
self._key = f"__torchinductor_{vname}"
|
| 28 |
+
self._default = default
|
| 29 |
+
|
| 30 |
+
def _set_handler(self, value):
|
| 31 |
+
prior = self._get_handler()
|
| 32 |
+
setattr(threadlocal, self._key, value)
|
| 33 |
+
|
| 34 |
+
@contextmanager
|
| 35 |
+
def ctx():
|
| 36 |
+
try:
|
| 37 |
+
yield
|
| 38 |
+
finally:
|
| 39 |
+
self._set_handler(prior)
|
| 40 |
+
|
| 41 |
+
return ctx()
|
| 42 |
+
|
| 43 |
+
def _get_handler(self):
|
| 44 |
+
try:
|
| 45 |
+
return getattr(threadlocal, self._key)
|
| 46 |
+
except AttributeError:
|
| 47 |
+
return self._default()
|
| 48 |
+
|
| 49 |
+
def __getattr__(self, name):
|
| 50 |
+
return getattr(self._get_handler(), name)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class NullHandler:
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _arg_str(a):
|
| 58 |
+
if isinstance(a, sympy.Expr):
|
| 59 |
+
return sympy_str(a)
|
| 60 |
+
return str(a)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class MockHandler:
|
| 64 |
+
def __getattr__(self, name):
|
| 65 |
+
if name == "name":
|
| 66 |
+
return "MockHandler"
|
| 67 |
+
|
| 68 |
+
def inner(*args, **kwargs):
|
| 69 |
+
fargs = [_arg_str(a) for a in args]
|
| 70 |
+
fargs.extend(f"{k}={v}" for k, v in kwargs.items())
|
| 71 |
+
return f"ops.{name}({', '.join(fargs)})"
|
| 72 |
+
|
| 73 |
+
return inner
|
| 74 |
+
|
| 75 |
+
@staticmethod
|
| 76 |
+
def masked(mask, body, other):
|
| 77 |
+
return f"ops.masked({mask}, {body()}, {other})"
|
| 78 |
+
|
| 79 |
+
@staticmethod
|
| 80 |
+
def indirect_indexing(index_var, size, check=True):
|
| 81 |
+
return sympy_symbol(f"({str(index_var)})")
|
| 82 |
+
|
| 83 |
+
@classmethod
|
| 84 |
+
def _init_cls(cls):
|
| 85 |
+
def make_handler(format_string):
|
| 86 |
+
@staticmethod # type: ignore[misc]
|
| 87 |
+
def inner(*args):
|
| 88 |
+
return format_string.format(*args)
|
| 89 |
+
|
| 90 |
+
return inner
|
| 91 |
+
|
| 92 |
+
for name, format_string in chain(
|
| 93 |
+
magic_methods.items(), inplace_methods.items()
|
| 94 |
+
):
|
| 95 |
+
setattr(cls, name, make_handler(format_string))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class KernelFormatterHandler:
|
| 99 |
+
def __init__(self, parent_handler):
|
| 100 |
+
self.parent_handler = parent_handler
|
| 101 |
+
self.output = IndentedBuffer(1)
|
| 102 |
+
self.var_counter = itertools.count()
|
| 103 |
+
|
| 104 |
+
@staticmethod
|
| 105 |
+
def ir_to_string(ir_fn, index, rindex=None):
|
| 106 |
+
from .ir import FlexibleLayout
|
| 107 |
+
|
| 108 |
+
args = [index, rindex] if rindex is not None else [index]
|
| 109 |
+
names = ["index", "rindex"] if rindex is not None else ["index"]
|
| 110 |
+
formatter = KernelFormatterHandler(MockHandler())
|
| 111 |
+
|
| 112 |
+
with formatter.output.indent(-1):
|
| 113 |
+
formatter.output.writeline(f"def inner_fn({', '.join(names)}):")
|
| 114 |
+
for name, arg in zip(names, args):
|
| 115 |
+
if arg:
|
| 116 |
+
lhs = ", ".join(
|
| 117 |
+
[
|
| 118 |
+
str("_" if isinstance(v, (int, sympy.Integer)) else v)
|
| 119 |
+
for v in arg
|
| 120 |
+
]
|
| 121 |
+
)
|
| 122 |
+
formatter.output.writeline(f"{lhs} = {name}")
|
| 123 |
+
|
| 124 |
+
with V.set_ops_handler(formatter), patch.object( # type: ignore[call-arg]
|
| 125 |
+
FlexibleLayout, "allow_indexing", True
|
| 126 |
+
):
|
| 127 |
+
result = ir_fn(*args)
|
| 128 |
+
return formatter.getvalue(result)
|
| 129 |
+
|
| 130 |
+
def __getattr__(self, name):
|
| 131 |
+
def inner(*args, **kwargs):
|
| 132 |
+
line = getattr(self.parent_handler, name)(*args, **kwargs)
|
| 133 |
+
if name == "indirect_indexing":
|
| 134 |
+
return line
|
| 135 |
+
# replace line with a new variable name
|
| 136 |
+
varname = f"tmp{next(self.var_counter)}"
|
| 137 |
+
self.output.writeline(f"{varname} = {line}")
|
| 138 |
+
return varname
|
| 139 |
+
|
| 140 |
+
return inner
|
| 141 |
+
|
| 142 |
+
def reduction(self, dtype, src_dtype, reduction_type, value):
|
| 143 |
+
line = self.parent_handler.reduction(dtype, src_dtype, reduction_type, value)
|
| 144 |
+
num_values = reduction_num_outputs(reduction_type)
|
| 145 |
+
varnames = [f"tmp{next(self.var_counter)}" for _ in range(num_values)]
|
| 146 |
+
self.output.writeline(f"{','.join(varnames)} = {line}")
|
| 147 |
+
return tuple(varnames) if num_values > 1 else varnames[0]
|
| 148 |
+
|
| 149 |
+
def getvalue(self, result):
|
| 150 |
+
self.output.writeline(f"return {result}")
|
| 151 |
+
return self.output.getvalue()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class WrapperHandler:
|
| 155 |
+
def __init__(self, inner):
|
| 156 |
+
self._inner = inner
|
| 157 |
+
|
| 158 |
+
def __getattr__(self, item):
|
| 159 |
+
return getattr(self._inner, item)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
MockHandler._init_cls()
|
| 163 |
+
|
| 164 |
+
_ops = Virtualized("ops", MockHandler)
|
| 165 |
+
_graph = Virtualized("graph", NullHandler)
|
| 166 |
+
_real_inputs = Virtualized("real_inputs", NullHandler)
|
| 167 |
+
_fake_mode = Virtualized("fake_mode", NullHandler)
|
| 168 |
+
_kernel = Virtualized("kernel", NullHandler)
|
| 169 |
+
_debug = Virtualized("debug", NullHandler)
|
| 170 |
+
_interpreter = Virtualized("interpreter", NullHandler)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class OpsValue:
|
| 174 |
+
"""The return type of most ops calls.
|
| 175 |
+
|
| 176 |
+
This exists so we can overload magic methods, and write mathematical
|
| 177 |
+
expressions much more fluently. So instead of
|
| 178 |
+
|
| 179 |
+
ops.add(ops.mul(ops.mul(ops.sub(ops.mul(_Ap2, x), _Ap3), x), x), _1)
|
| 180 |
+
|
| 181 |
+
we can write
|
| 182 |
+
|
| 183 |
+
(_Ap2 * x - _Ap3) * x * x + _1
|
| 184 |
+
|
| 185 |
+
"""
|
| 186 |
+
|
| 187 |
+
value: Any
|
| 188 |
+
|
| 189 |
+
def __init__(self, value):
|
| 190 |
+
self.value = value
|
| 191 |
+
|
| 192 |
+
def __str__(self):
|
| 193 |
+
return str(self.value)
|
| 194 |
+
|
| 195 |
+
def __repr__(self):
|
| 196 |
+
return f"OpsValue({self.value!r})"
|
| 197 |
+
|
| 198 |
+
def __add__(self, other):
|
| 199 |
+
return ops.add(self, other)
|
| 200 |
+
|
| 201 |
+
def __mul__(self, other):
|
| 202 |
+
return ops.mul(self, other)
|
| 203 |
+
|
| 204 |
+
def __sub__(self, other):
|
| 205 |
+
return ops.sub(self, other)
|
| 206 |
+
|
| 207 |
+
def __neg__(self):
|
| 208 |
+
return ops.neg(self)
|
| 209 |
+
|
| 210 |
+
def __truediv__(self, other):
|
| 211 |
+
return ops.truediv(self, other)
|
| 212 |
+
|
| 213 |
+
def __floordiv__(self, other):
|
| 214 |
+
return ops.floordiv(self, other)
|
| 215 |
+
|
| 216 |
+
def __mod__(self, other):
|
| 217 |
+
return ops.mod(self, other)
|
| 218 |
+
|
| 219 |
+
def __pow__(self, other):
|
| 220 |
+
return ops.pow(self, other)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class OpsWrapper:
|
| 224 |
+
"""This wraps any returned IR values into an `OpsValue` instance, so that we
|
| 225 |
+
can overload the magic methods for writing mathematical expressions fluently.
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def __getattr__(self, name):
|
| 229 |
+
def inner(*args, **kwargs):
|
| 230 |
+
new_args = [OpsWrapper._unwrap(a) for a in args]
|
| 231 |
+
new_kwargs = {k: OpsWrapper._unwrap(v) for k, v in kwargs.items()}
|
| 232 |
+
return OpsWrapper._wrap(getattr(_ops, name)(*new_args, **new_kwargs))
|
| 233 |
+
|
| 234 |
+
return inner
|
| 235 |
+
|
| 236 |
+
@staticmethod
|
| 237 |
+
def _unwrap(x):
|
| 238 |
+
if isinstance(x, (list, tuple)):
|
| 239 |
+
return tuple(OpsWrapper._unwrap(v) for v in x)
|
| 240 |
+
if isinstance(x, OpsValue):
|
| 241 |
+
return x.value
|
| 242 |
+
return x
|
| 243 |
+
|
| 244 |
+
@staticmethod
|
| 245 |
+
def _wrap(x):
|
| 246 |
+
if isinstance(x, (list, tuple)):
|
| 247 |
+
return tuple(OpsValue(v) for v in x)
|
| 248 |
+
return OpsValue(x)
|
| 249 |
+
|
| 250 |
+
@staticmethod
|
| 251 |
+
def indirect_indexing(index, size, check=True):
|
| 252 |
+
# Returns a sympy value, not IR value
|
| 253 |
+
index = OpsWrapper._unwrap(index)
|
| 254 |
+
return _ops.indirect_indexing(index, size, check)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
ops = OpsWrapper()
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class _V:
|
| 261 |
+
MockHandler = MockHandler
|
| 262 |
+
KernelFormatterHandler = KernelFormatterHandler
|
| 263 |
+
WrapperHandler = WrapperHandler
|
| 264 |
+
|
| 265 |
+
set_ops_handler = _ops._set_handler
|
| 266 |
+
get_ops_handler = _ops._get_handler
|
| 267 |
+
set_graph_handler = _graph._set_handler
|
| 268 |
+
set_real_inputs = _real_inputs._set_handler
|
| 269 |
+
get_real_inputs = _real_inputs._get_handler
|
| 270 |
+
set_fake_mode = _fake_mode._set_handler
|
| 271 |
+
get_fake_mode = _fake_mode._get_handler
|
| 272 |
+
set_kernel_handler = _kernel._set_handler
|
| 273 |
+
set_debug_handler = _debug._set_handler
|
| 274 |
+
set_interpreter_handler = _interpreter._set_handler
|
| 275 |
+
|
| 276 |
+
@property
|
| 277 |
+
def ops(self) -> MockHandler: # type: ignore[valid-type]
|
| 278 |
+
"""The operator handler specific to the current codegen task"""
|
| 279 |
+
return _ops._get_handler()
|
| 280 |
+
|
| 281 |
+
@property
|
| 282 |
+
def graph(self):
|
| 283 |
+
"""The graph currently being generated"""
|
| 284 |
+
return _graph._get_handler()
|
| 285 |
+
|
| 286 |
+
@property
|
| 287 |
+
def real_inputs(self):
|
| 288 |
+
"""non-fake example inputs"""
|
| 289 |
+
return _real_inputs._get_handler()
|
| 290 |
+
|
| 291 |
+
@property
|
| 292 |
+
def fake_mode(self):
|
| 293 |
+
"""The graph currently being generated"""
|
| 294 |
+
return _fake_mode._get_handler()
|
| 295 |
+
|
| 296 |
+
@property
|
| 297 |
+
def kernel(self):
|
| 298 |
+
"""The kernel currently being generated"""
|
| 299 |
+
return _kernel._get_handler()
|
| 300 |
+
|
| 301 |
+
@property
|
| 302 |
+
def debug(self):
|
| 303 |
+
return _debug._get_handler()
|
| 304 |
+
|
| 305 |
+
@property
|
| 306 |
+
def interpreter(self):
|
| 307 |
+
return _interpreter._get_handler()
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
V = _V()
|
vlmpy310/lib/python3.10/site-packages/pyglet/gl/__pycache__/gl.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a88fc0af7a9eb460244b9ab24220a69f994edaa733ca8d75f6a023b41f3201c
|
| 3 |
+
size 118683
|
vlmpy310/lib/python3.10/site-packages/skimage/future/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Functionality with an experimental API.
|
| 2 |
+
|
| 3 |
+
.. warning::
|
| 4 |
+
Although you can count on the functions in this package being
|
| 5 |
+
around in the future, the API may change with any version update
|
| 6 |
+
**and will not follow the skimage two-version deprecation path**.
|
| 7 |
+
Therefore, use the functions herein with care, and do not use them
|
| 8 |
+
in production code that will depend on updated skimage versions.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import lazy_loader as _lazy
|
| 12 |
+
|
| 13 |
+
__getattr__, __dir__, __all__ = _lazy.attach_stub(__name__, __file__)
|
vlmpy310/lib/python3.10/site-packages/skimage/future/manual_segmentation.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import reduce
|
| 2 |
+
import numpy as np
|
| 3 |
+
from ..draw import polygon
|
| 4 |
+
from .._shared.version_requirements import require
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
LEFT_CLICK = 1
|
| 8 |
+
RIGHT_CLICK = 3
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _mask_from_vertices(vertices, shape, label):
|
| 12 |
+
mask = np.zeros(shape, dtype=int)
|
| 13 |
+
pr = [y for x, y in vertices]
|
| 14 |
+
pc = [x for x, y in vertices]
|
| 15 |
+
rr, cc = polygon(pr, pc, shape)
|
| 16 |
+
mask[rr, cc] = label
|
| 17 |
+
return mask
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@require("matplotlib", ">=3.3")
|
| 21 |
+
def _draw_polygon(ax, vertices, alpha=0.4):
|
| 22 |
+
from matplotlib.patches import Polygon
|
| 23 |
+
from matplotlib.collections import PatchCollection
|
| 24 |
+
import matplotlib.pyplot as plt
|
| 25 |
+
|
| 26 |
+
polygon = Polygon(vertices, closed=True)
|
| 27 |
+
p = PatchCollection([polygon], match_original=True, alpha=alpha)
|
| 28 |
+
polygon_object = ax.add_collection(p)
|
| 29 |
+
plt.draw()
|
| 30 |
+
return polygon_object
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@require("matplotlib", ">=3.3")
|
| 34 |
+
def manual_polygon_segmentation(image, alpha=0.4, return_all=False):
|
| 35 |
+
"""Return a label image based on polygon selections made with the mouse.
|
| 36 |
+
|
| 37 |
+
Parameters
|
| 38 |
+
----------
|
| 39 |
+
image : (M, N[, 3]) array
|
| 40 |
+
Grayscale or RGB image.
|
| 41 |
+
|
| 42 |
+
alpha : float, optional
|
| 43 |
+
Transparency value for polygons drawn over the image.
|
| 44 |
+
|
| 45 |
+
return_all : bool, optional
|
| 46 |
+
If True, an array containing each separate polygon drawn is returned.
|
| 47 |
+
(The polygons may overlap.) If False (default), latter polygons
|
| 48 |
+
"overwrite" earlier ones where they overlap.
|
| 49 |
+
|
| 50 |
+
Returns
|
| 51 |
+
-------
|
| 52 |
+
labels : array of int, shape ([Q, ]M, N)
|
| 53 |
+
The segmented regions. If mode is `'separate'`, the leading dimension
|
| 54 |
+
of the array corresponds to the number of regions that the user drew.
|
| 55 |
+
|
| 56 |
+
Notes
|
| 57 |
+
-----
|
| 58 |
+
Use left click to select the vertices of the polygon
|
| 59 |
+
and right click to confirm the selection once all vertices are selected.
|
| 60 |
+
|
| 61 |
+
Examples
|
| 62 |
+
--------
|
| 63 |
+
>>> from skimage import data, future
|
| 64 |
+
>>> import matplotlib.pyplot as plt # doctest: +SKIP
|
| 65 |
+
>>> camera = data.camera()
|
| 66 |
+
>>> mask = future.manual_polygon_segmentation(camera) # doctest: +SKIP
|
| 67 |
+
>>> fig, ax = plt.subplots() # doctest: +SKIP
|
| 68 |
+
>>> ax.imshow(mask) # doctest: +SKIP
|
| 69 |
+
>>> plt.show() # doctest: +SKIP
|
| 70 |
+
"""
|
| 71 |
+
import matplotlib
|
| 72 |
+
import matplotlib.pyplot as plt
|
| 73 |
+
|
| 74 |
+
list_of_vertex_lists = []
|
| 75 |
+
polygons_drawn = []
|
| 76 |
+
|
| 77 |
+
temp_list = []
|
| 78 |
+
preview_polygon_drawn = []
|
| 79 |
+
|
| 80 |
+
if image.ndim not in (2, 3):
|
| 81 |
+
raise ValueError('Only 2D grayscale or RGB images are supported.')
|
| 82 |
+
|
| 83 |
+
fig, ax = plt.subplots()
|
| 84 |
+
fig.subplots_adjust(bottom=0.2)
|
| 85 |
+
ax.imshow(image, cmap="gray")
|
| 86 |
+
ax.set_axis_off()
|
| 87 |
+
|
| 88 |
+
def _undo(*args, **kwargs):
|
| 89 |
+
if list_of_vertex_lists:
|
| 90 |
+
list_of_vertex_lists.pop()
|
| 91 |
+
# Remove last polygon from list of polygons...
|
| 92 |
+
last_poly = polygons_drawn.pop()
|
| 93 |
+
# ... then from the plot
|
| 94 |
+
last_poly.remove()
|
| 95 |
+
fig.canvas.draw_idle()
|
| 96 |
+
|
| 97 |
+
undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075])
|
| 98 |
+
undo_button = matplotlib.widgets.Button(undo_pos, '\u27f2')
|
| 99 |
+
undo_button.on_clicked(_undo)
|
| 100 |
+
|
| 101 |
+
def _extend_polygon(event):
|
| 102 |
+
# Do not record click events outside axis or in undo button
|
| 103 |
+
if event.inaxes is None or event.inaxes is undo_pos:
|
| 104 |
+
return
|
| 105 |
+
# Do not record click events when toolbar is active
|
| 106 |
+
if ax.get_navigate_mode():
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
if event.button == LEFT_CLICK: # Select vertex
|
| 110 |
+
temp_list.append([event.xdata, event.ydata])
|
| 111 |
+
# Remove previously drawn preview polygon if any.
|
| 112 |
+
if preview_polygon_drawn:
|
| 113 |
+
poly = preview_polygon_drawn.pop()
|
| 114 |
+
poly.remove()
|
| 115 |
+
|
| 116 |
+
# Preview polygon with selected vertices.
|
| 117 |
+
polygon = _draw_polygon(ax, temp_list, alpha=(alpha / 1.4))
|
| 118 |
+
preview_polygon_drawn.append(polygon)
|
| 119 |
+
|
| 120 |
+
elif event.button == RIGHT_CLICK: # Confirm the selection
|
| 121 |
+
if not temp_list:
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
# Store the vertices of the polygon as shown in preview.
|
| 125 |
+
# Redraw polygon and store it in polygons_drawn so that
|
| 126 |
+
# `_undo` works correctly.
|
| 127 |
+
list_of_vertex_lists.append(temp_list[:])
|
| 128 |
+
polygon_object = _draw_polygon(ax, temp_list, alpha=alpha)
|
| 129 |
+
polygons_drawn.append(polygon_object)
|
| 130 |
+
|
| 131 |
+
# Empty the temporary variables.
|
| 132 |
+
preview_poly = preview_polygon_drawn.pop()
|
| 133 |
+
preview_poly.remove()
|
| 134 |
+
del temp_list[:]
|
| 135 |
+
|
| 136 |
+
plt.draw()
|
| 137 |
+
|
| 138 |
+
fig.canvas.mpl_connect('button_press_event', _extend_polygon)
|
| 139 |
+
|
| 140 |
+
plt.show(block=True)
|
| 141 |
+
|
| 142 |
+
labels = (
|
| 143 |
+
_mask_from_vertices(vertices, image.shape[:2], i)
|
| 144 |
+
for i, vertices in enumerate(list_of_vertex_lists, start=1)
|
| 145 |
+
)
|
| 146 |
+
if return_all:
|
| 147 |
+
return np.stack(labels)
|
| 148 |
+
else:
|
| 149 |
+
return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2]))
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@require("matplotlib", ">=3.3")
|
| 153 |
+
def manual_lasso_segmentation(image, alpha=0.4, return_all=False):
|
| 154 |
+
"""Return a label image based on freeform selections made with the mouse.
|
| 155 |
+
|
| 156 |
+
Parameters
|
| 157 |
+
----------
|
| 158 |
+
image : (M, N[, 3]) array
|
| 159 |
+
Grayscale or RGB image.
|
| 160 |
+
|
| 161 |
+
alpha : float, optional
|
| 162 |
+
Transparency value for polygons drawn over the image.
|
| 163 |
+
|
| 164 |
+
return_all : bool, optional
|
| 165 |
+
If True, an array containing each separate polygon drawn is returned.
|
| 166 |
+
(The polygons may overlap.) If False (default), latter polygons
|
| 167 |
+
"overwrite" earlier ones where they overlap.
|
| 168 |
+
|
| 169 |
+
Returns
|
| 170 |
+
-------
|
| 171 |
+
labels : array of int, shape ([Q, ]M, N)
|
| 172 |
+
The segmented regions. If mode is `'separate'`, the leading dimension
|
| 173 |
+
of the array corresponds to the number of regions that the user drew.
|
| 174 |
+
|
| 175 |
+
Notes
|
| 176 |
+
-----
|
| 177 |
+
Press and hold the left mouse button to draw around each object.
|
| 178 |
+
|
| 179 |
+
Examples
|
| 180 |
+
--------
|
| 181 |
+
>>> from skimage import data, future
|
| 182 |
+
>>> import matplotlib.pyplot as plt # doctest: +SKIP
|
| 183 |
+
>>> camera = data.camera()
|
| 184 |
+
>>> mask = future.manual_lasso_segmentation(camera) # doctest: +SKIP
|
| 185 |
+
>>> fig, ax = plt.subplots() # doctest: +SKIP
|
| 186 |
+
>>> ax.imshow(mask) # doctest: +SKIP
|
| 187 |
+
>>> plt.show() # doctest: +SKIP
|
| 188 |
+
"""
|
| 189 |
+
import matplotlib
|
| 190 |
+
import matplotlib.pyplot as plt
|
| 191 |
+
|
| 192 |
+
list_of_vertex_lists = []
|
| 193 |
+
polygons_drawn = []
|
| 194 |
+
|
| 195 |
+
if image.ndim not in (2, 3):
|
| 196 |
+
raise ValueError('Only 2D grayscale or RGB images are supported.')
|
| 197 |
+
|
| 198 |
+
fig, ax = plt.subplots()
|
| 199 |
+
fig.subplots_adjust(bottom=0.2)
|
| 200 |
+
ax.imshow(image, cmap="gray")
|
| 201 |
+
ax.set_axis_off()
|
| 202 |
+
|
| 203 |
+
def _undo(*args, **kwargs):
|
| 204 |
+
if list_of_vertex_lists:
|
| 205 |
+
list_of_vertex_lists.pop()
|
| 206 |
+
# Remove last polygon from list of polygons...
|
| 207 |
+
last_poly = polygons_drawn.pop()
|
| 208 |
+
# ... then from the plot
|
| 209 |
+
last_poly.remove()
|
| 210 |
+
fig.canvas.draw_idle()
|
| 211 |
+
|
| 212 |
+
undo_pos = fig.add_axes([0.85, 0.05, 0.075, 0.075])
|
| 213 |
+
undo_button = matplotlib.widgets.Button(undo_pos, '\u27f2')
|
| 214 |
+
undo_button.on_clicked(_undo)
|
| 215 |
+
|
| 216 |
+
def _on_lasso_selection(vertices):
|
| 217 |
+
if len(vertices) < 3:
|
| 218 |
+
return
|
| 219 |
+
list_of_vertex_lists.append(vertices)
|
| 220 |
+
polygon_object = _draw_polygon(ax, vertices, alpha=alpha)
|
| 221 |
+
polygons_drawn.append(polygon_object)
|
| 222 |
+
plt.draw()
|
| 223 |
+
|
| 224 |
+
matplotlib.widgets.LassoSelector(ax, _on_lasso_selection)
|
| 225 |
+
|
| 226 |
+
plt.show(block=True)
|
| 227 |
+
|
| 228 |
+
labels = (
|
| 229 |
+
_mask_from_vertices(vertices, image.shape[:2], i)
|
| 230 |
+
for i, vertices in enumerate(list_of_vertex_lists, start=1)
|
| 231 |
+
)
|
| 232 |
+
if return_all:
|
| 233 |
+
return np.stack(labels)
|
| 234 |
+
else:
|
| 235 |
+
return reduce(np.maximum, labels, np.broadcast_to(0, image.shape[:2]))
|
vlmpy310/lib/python3.10/site-packages/skimage/future/trainable_segmentation.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from skimage.feature import multiscale_basic_features
|
| 2 |
+
|
| 3 |
+
try:
|
| 4 |
+
from sklearn.exceptions import NotFittedError
|
| 5 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 6 |
+
|
| 7 |
+
has_sklearn = True
|
| 8 |
+
except ImportError:
|
| 9 |
+
has_sklearn = False
|
| 10 |
+
|
| 11 |
+
class NotFittedError(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TrainableSegmenter:
|
| 16 |
+
"""Estimator for classifying pixels.
|
| 17 |
+
|
| 18 |
+
Parameters
|
| 19 |
+
----------
|
| 20 |
+
clf : classifier object, optional
|
| 21 |
+
classifier object, exposing a ``fit`` and a ``predict`` method as in
|
| 22 |
+
scikit-learn's API, for example an instance of
|
| 23 |
+
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
|
| 24 |
+
features_func : function, optional
|
| 25 |
+
function computing features on all pixels of the image, to be passed
|
| 26 |
+
to the classifier. The output should be of shape
|
| 27 |
+
``(m_features, *labels.shape)``. If None,
|
| 28 |
+
:func:`skimage.feature.multiscale_basic_features` is used.
|
| 29 |
+
|
| 30 |
+
Methods
|
| 31 |
+
-------
|
| 32 |
+
compute_features
|
| 33 |
+
fit
|
| 34 |
+
predict
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, clf=None, features_func=None):
|
| 38 |
+
if clf is None:
|
| 39 |
+
if has_sklearn:
|
| 40 |
+
self.clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
|
| 41 |
+
else:
|
| 42 |
+
raise ImportError(
|
| 43 |
+
"Please install scikit-learn or pass a classifier instance"
|
| 44 |
+
"to TrainableSegmenter."
|
| 45 |
+
)
|
| 46 |
+
else:
|
| 47 |
+
self.clf = clf
|
| 48 |
+
self.features_func = features_func
|
| 49 |
+
|
| 50 |
+
def compute_features(self, image):
|
| 51 |
+
if self.features_func is None:
|
| 52 |
+
self.features_func = multiscale_basic_features
|
| 53 |
+
self.features = self.features_func(image)
|
| 54 |
+
|
| 55 |
+
def fit(self, image, labels):
|
| 56 |
+
"""Train classifier using partially labeled (annotated) image.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
image : ndarray
|
| 61 |
+
Input image, which can be grayscale or multichannel, and must have a
|
| 62 |
+
number of dimensions compatible with ``self.features_func``.
|
| 63 |
+
labels : ndarray of ints
|
| 64 |
+
Labeled array of shape compatible with ``image`` (same shape for a
|
| 65 |
+
single-channel image). Labels >= 1 correspond to the training set and
|
| 66 |
+
label 0 to unlabeled pixels to be segmented.
|
| 67 |
+
"""
|
| 68 |
+
self.compute_features(image)
|
| 69 |
+
fit_segmenter(labels, self.features, self.clf)
|
| 70 |
+
|
| 71 |
+
def predict(self, image):
|
| 72 |
+
"""Segment new image using trained internal classifier.
|
| 73 |
+
|
| 74 |
+
Parameters
|
| 75 |
+
----------
|
| 76 |
+
image : ndarray
|
| 77 |
+
Input image, which can be grayscale or multichannel, and must have a
|
| 78 |
+
number of dimensions compatible with ``self.features_func``.
|
| 79 |
+
|
| 80 |
+
Raises
|
| 81 |
+
------
|
| 82 |
+
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
|
| 83 |
+
"""
|
| 84 |
+
if self.features_func is None:
|
| 85 |
+
self.features_func = multiscale_basic_features
|
| 86 |
+
features = self.features_func(image)
|
| 87 |
+
return predict_segmenter(features, self.clf)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def fit_segmenter(labels, features, clf):
|
| 91 |
+
"""Segmentation using labeled parts of the image and a classifier.
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
labels : ndarray of ints
|
| 96 |
+
Image of labels. Labels >= 1 correspond to the training set and
|
| 97 |
+
label 0 to unlabeled pixels to be segmented.
|
| 98 |
+
features : ndarray
|
| 99 |
+
Array of features, with the first dimension corresponding to the number
|
| 100 |
+
of features, and the other dimensions correspond to ``labels.shape``.
|
| 101 |
+
clf : classifier object
|
| 102 |
+
classifier object, exposing a ``fit`` and a ``predict`` method as in
|
| 103 |
+
scikit-learn's API, for example an instance of
|
| 104 |
+
``RandomForestClassifier`` or ``LogisticRegression`` classifier.
|
| 105 |
+
|
| 106 |
+
Returns
|
| 107 |
+
-------
|
| 108 |
+
clf : classifier object
|
| 109 |
+
classifier trained on ``labels``
|
| 110 |
+
|
| 111 |
+
Raises
|
| 112 |
+
------
|
| 113 |
+
NotFittedError if ``self.clf`` has not been fitted yet (use ``self.fit``).
|
| 114 |
+
"""
|
| 115 |
+
mask = labels > 0
|
| 116 |
+
training_data = features[mask]
|
| 117 |
+
training_labels = labels[mask].ravel()
|
| 118 |
+
clf.fit(training_data, training_labels)
|
| 119 |
+
return clf
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def predict_segmenter(features, clf):
|
| 123 |
+
"""Segmentation of images using a pretrained classifier.
|
| 124 |
+
|
| 125 |
+
Parameters
|
| 126 |
+
----------
|
| 127 |
+
features : ndarray
|
| 128 |
+
Array of features, with the last dimension corresponding to the number
|
| 129 |
+
of features, and the other dimensions are compatible with the shape of
|
| 130 |
+
the image to segment, or a flattened image.
|
| 131 |
+
clf : classifier object
|
| 132 |
+
trained classifier object, exposing a ``predict`` method as in
|
| 133 |
+
scikit-learn's API, for example an instance of
|
| 134 |
+
``RandomForestClassifier`` or ``LogisticRegression`` classifier. The
|
| 135 |
+
classifier must be already trained, for example with
|
| 136 |
+
:func:`skimage.future.fit_segmenter`.
|
| 137 |
+
|
| 138 |
+
Returns
|
| 139 |
+
-------
|
| 140 |
+
output : ndarray
|
| 141 |
+
Labeled array, built from the prediction of the classifier.
|
| 142 |
+
"""
|
| 143 |
+
sh = features.shape
|
| 144 |
+
if features.ndim > 2:
|
| 145 |
+
features = features.reshape((-1, sh[-1]))
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
predicted_labels = clf.predict(features)
|
| 149 |
+
except NotFittedError:
|
| 150 |
+
raise NotFittedError(
|
| 151 |
+
"You must train the classifier `clf` first"
|
| 152 |
+
"for example with the `fit_segmenter` function."
|
| 153 |
+
)
|
| 154 |
+
except ValueError as err:
|
| 155 |
+
if err.args and 'x must consist of vectors of length' in err.args[0]:
|
| 156 |
+
raise ValueError(
|
| 157 |
+
err.args[0]
|
| 158 |
+
+ '\n'
|
| 159 |
+
+ "Maybe you did not use the same type of features for training the classifier."
|
| 160 |
+
)
|
| 161 |
+
else:
|
| 162 |
+
raise err
|
| 163 |
+
output = predicted_labels.reshape(sh[:-1])
|
| 164 |
+
return output
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.26 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_chan_vese.cpython-310.pyc
ADDED
|
Binary file (13.3 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_clear_border.cpython-310.pyc
ADDED
|
Binary file (3.76 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_expand_labels.cpython-310.pyc
ADDED
|
Binary file (4.28 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_felzenszwalb.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_join.cpython-310.pyc
ADDED
|
Binary file (6.04 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_quickshift.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/_watershed.cpython-310.pyc
ADDED
|
Binary file (9.06 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/active_contour_model.cpython-310.pyc
ADDED
|
Binary file (6.06 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/boundaries.cpython-310.pyc
ADDED
|
Binary file (9.35 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/morphsnakes.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/random_walker_segmentation.cpython-310.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/__pycache__/slic_superpixels.cpython-310.pyc
ADDED
|
Binary file (12.3 kB). View file
|
|
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_clear_border.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ..measure import label
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def clear_border(labels, buffer_size=0, bgval=0, mask=None, *, out=None):
|
| 7 |
+
"""Clear objects connected to the label image border.
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
labels : (M[, N[, ..., P]]) array of int or bool
|
| 12 |
+
Imaging data labels.
|
| 13 |
+
buffer_size : int, optional
|
| 14 |
+
The width of the border examined. By default, only objects
|
| 15 |
+
that touch the outside of the image are removed.
|
| 16 |
+
bgval : float or int, optional
|
| 17 |
+
Cleared objects are set to this value.
|
| 18 |
+
mask : ndarray of bool, same shape as `image`, optional.
|
| 19 |
+
Image data mask. Objects in labels image overlapping with
|
| 20 |
+
False pixels of mask will be removed. If defined, the
|
| 21 |
+
argument buffer_size will be ignored.
|
| 22 |
+
out : ndarray
|
| 23 |
+
Array of the same shape as `labels`, into which the
|
| 24 |
+
output is placed. By default, a new array is created.
|
| 25 |
+
|
| 26 |
+
Returns
|
| 27 |
+
-------
|
| 28 |
+
out : (M[, N[, ..., P]]) array
|
| 29 |
+
Imaging data labels with cleared borders
|
| 30 |
+
|
| 31 |
+
Examples
|
| 32 |
+
--------
|
| 33 |
+
>>> import numpy as np
|
| 34 |
+
>>> from skimage.segmentation import clear_border
|
| 35 |
+
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
|
| 36 |
+
... [1, 1, 0, 0, 1, 0, 0, 1, 0],
|
| 37 |
+
... [1, 1, 0, 1, 0, 1, 0, 0, 0],
|
| 38 |
+
... [0, 0, 0, 1, 1, 1, 1, 0, 0],
|
| 39 |
+
... [0, 1, 1, 1, 1, 1, 1, 1, 0],
|
| 40 |
+
... [0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
| 41 |
+
>>> clear_border(labels)
|
| 42 |
+
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 43 |
+
[0, 0, 0, 0, 1, 0, 0, 0, 0],
|
| 44 |
+
[0, 0, 0, 1, 0, 1, 0, 0, 0],
|
| 45 |
+
[0, 0, 0, 1, 1, 1, 1, 0, 0],
|
| 46 |
+
[0, 1, 1, 1, 1, 1, 1, 1, 0],
|
| 47 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
| 48 |
+
>>> mask = np.array([[0, 0, 1, 1, 1, 1, 1, 1, 1],
|
| 49 |
+
... [0, 0, 1, 1, 1, 1, 1, 1, 1],
|
| 50 |
+
... [1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 51 |
+
... [1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 52 |
+
... [1, 1, 1, 1, 1, 1, 1, 1, 1],
|
| 53 |
+
... [1, 1, 1, 1, 1, 1, 1, 1, 1]]).astype(bool)
|
| 54 |
+
>>> clear_border(labels, mask=mask)
|
| 55 |
+
array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
|
| 56 |
+
[0, 0, 0, 0, 1, 0, 0, 1, 0],
|
| 57 |
+
[0, 0, 0, 1, 0, 1, 0, 0, 0],
|
| 58 |
+
[0, 0, 0, 1, 1, 1, 1, 0, 0],
|
| 59 |
+
[0, 1, 1, 1, 1, 1, 1, 1, 0],
|
| 60 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
if any(buffer_size >= s for s in labels.shape) and mask is None:
|
| 64 |
+
# ignore buffer_size if mask
|
| 65 |
+
raise ValueError("buffer size may not be greater than labels size")
|
| 66 |
+
|
| 67 |
+
if out is None:
|
| 68 |
+
out = labels.copy()
|
| 69 |
+
|
| 70 |
+
if mask is not None:
|
| 71 |
+
err_msg = (
|
| 72 |
+
f'labels and mask should have the same shape but '
|
| 73 |
+
f'are {out.shape} and {mask.shape}'
|
| 74 |
+
)
|
| 75 |
+
if out.shape != mask.shape:
|
| 76 |
+
raise (ValueError, err_msg)
|
| 77 |
+
if mask.dtype != bool:
|
| 78 |
+
raise TypeError("mask should be of type bool.")
|
| 79 |
+
borders = ~mask
|
| 80 |
+
else:
|
| 81 |
+
# create borders with buffer_size
|
| 82 |
+
borders = np.zeros_like(out, dtype=bool)
|
| 83 |
+
ext = buffer_size + 1
|
| 84 |
+
slstart = slice(ext)
|
| 85 |
+
slend = slice(-ext, None)
|
| 86 |
+
slices = [slice(None) for _ in out.shape]
|
| 87 |
+
for d in range(out.ndim):
|
| 88 |
+
slices[d] = slstart
|
| 89 |
+
borders[tuple(slices)] = True
|
| 90 |
+
slices[d] = slend
|
| 91 |
+
borders[tuple(slices)] = True
|
| 92 |
+
slices[d] = slice(None)
|
| 93 |
+
|
| 94 |
+
# Re-label, in case we are dealing with a binary out
|
| 95 |
+
# and to get consistent labeling
|
| 96 |
+
labels, number = label(out, background=0, return_num=True)
|
| 97 |
+
|
| 98 |
+
# determine all objects that are connected to borders
|
| 99 |
+
borders_indices = np.unique(labels[borders])
|
| 100 |
+
indices = np.arange(number + 1)
|
| 101 |
+
# mask all label indices that are connected to borders
|
| 102 |
+
label_mask = np.isin(indices, borders_indices)
|
| 103 |
+
# create mask for pixels to clear
|
| 104 |
+
mask = label_mask[labels.reshape(-1)].reshape(labels.shape)
|
| 105 |
+
|
| 106 |
+
# clear border pixels
|
| 107 |
+
out[mask] = bgval
|
| 108 |
+
|
| 109 |
+
return out
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from ._felzenszwalb_cy import _felzenszwalb_cython
|
| 4 |
+
from .._shared import utils
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@utils.channel_as_last_axis(multichannel_output=False)
|
| 8 |
+
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20, *, channel_axis=-1):
|
| 9 |
+
"""Computes Felsenszwalb's efficient graph based image segmentation.
|
| 10 |
+
|
| 11 |
+
Produces an oversegmentation of a multichannel (i.e. RGB) image
|
| 12 |
+
using a fast, minimum spanning tree based clustering on the image grid.
|
| 13 |
+
The parameter ``scale`` sets an observation level. Higher scale means
|
| 14 |
+
less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,
|
| 15 |
+
used for smoothing the image prior to segmentation.
|
| 16 |
+
|
| 17 |
+
The number of produced segments as well as their size can only be
|
| 18 |
+
controlled indirectly through ``scale``. Segment size within an image can
|
| 19 |
+
vary greatly depending on local contrast.
|
| 20 |
+
|
| 21 |
+
For RGB images, the algorithm uses the euclidean distance between pixels in
|
| 22 |
+
color space.
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
----------
|
| 26 |
+
image : (M, N[, 3]) ndarray
|
| 27 |
+
Input image.
|
| 28 |
+
scale : float
|
| 29 |
+
Free parameter. Higher means larger clusters.
|
| 30 |
+
sigma : float
|
| 31 |
+
Width (standard deviation) of Gaussian kernel used in preprocessing.
|
| 32 |
+
min_size : int
|
| 33 |
+
Minimum component size. Enforced using postprocessing.
|
| 34 |
+
channel_axis : int or None, optional
|
| 35 |
+
If None, the image is assumed to be a grayscale (single channel) image.
|
| 36 |
+
Otherwise, this parameter indicates which axis of the array corresponds
|
| 37 |
+
to channels.
|
| 38 |
+
|
| 39 |
+
.. versionadded:: 0.19
|
| 40 |
+
``channel_axis`` was added in 0.19.
|
| 41 |
+
|
| 42 |
+
Returns
|
| 43 |
+
-------
|
| 44 |
+
segment_mask : (M, N) ndarray
|
| 45 |
+
Integer mask indicating segment labels.
|
| 46 |
+
|
| 47 |
+
References
|
| 48 |
+
----------
|
| 49 |
+
.. [1] Efficient graph-based image segmentation, Felzenszwalb, P.F. and
|
| 50 |
+
Huttenlocher, D.P. International Journal of Computer Vision, 2004
|
| 51 |
+
|
| 52 |
+
Notes
|
| 53 |
+
-----
|
| 54 |
+
The `k` parameter used in the original paper renamed to `scale` here.
|
| 55 |
+
|
| 56 |
+
Examples
|
| 57 |
+
--------
|
| 58 |
+
>>> from skimage.segmentation import felzenszwalb
|
| 59 |
+
>>> from skimage.data import coffee
|
| 60 |
+
>>> img = coffee()
|
| 61 |
+
>>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
|
| 62 |
+
"""
|
| 63 |
+
if channel_axis is None and image.ndim > 2:
|
| 64 |
+
raise ValueError(
|
| 65 |
+
"This algorithm works only on single or " "multi-channel 2d images. "
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
image = np.atleast_3d(image)
|
| 69 |
+
return _felzenszwalb_cython(image, scale=scale, sigma=sigma, min_size=min_size)
|
vlmpy310/lib/python3.10/site-packages/skimage/segmentation/morphsnakes.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import cycle
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import ndimage as ndi
|
| 5 |
+
|
| 6 |
+
from .._shared.utils import check_nD
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'morphological_chan_vese',
|
| 10 |
+
'morphological_geodesic_active_contour',
|
| 11 |
+
'inverse_gaussian_gradient',
|
| 12 |
+
'disk_level_set',
|
| 13 |
+
'checkerboard_level_set',
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _fcycle:
|
| 18 |
+
def __init__(self, iterable):
|
| 19 |
+
"""Call functions from the iterable each time it is called."""
|
| 20 |
+
self.funcs = cycle(iterable)
|
| 21 |
+
|
| 22 |
+
def __call__(self, *args, **kwargs):
|
| 23 |
+
f = next(self.funcs)
|
| 24 |
+
return f(*args, **kwargs)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# SI and IS operators for 2D and 3D.
|
| 28 |
+
_P2 = [
|
| 29 |
+
np.eye(3),
|
| 30 |
+
np.array([[0, 1, 0]] * 3),
|
| 31 |
+
np.flipud(np.eye(3)),
|
| 32 |
+
np.rot90([[0, 1, 0]] * 3),
|
| 33 |
+
]
|
| 34 |
+
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
|
| 35 |
+
|
| 36 |
+
_P3[0][:, :, 1] = 1
|
| 37 |
+
_P3[1][:, 1, :] = 1
|
| 38 |
+
_P3[2][1, :, :] = 1
|
| 39 |
+
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
|
| 40 |
+
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
|
| 41 |
+
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
|
| 42 |
+
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
|
| 43 |
+
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
|
| 44 |
+
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def sup_inf(u):
|
| 48 |
+
"""SI operator."""
|
| 49 |
+
|
| 50 |
+
if np.ndim(u) == 2:
|
| 51 |
+
P = _P2
|
| 52 |
+
elif np.ndim(u) == 3:
|
| 53 |
+
P = _P3
|
| 54 |
+
else:
|
| 55 |
+
raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)")
|
| 56 |
+
|
| 57 |
+
erosions = []
|
| 58 |
+
for P_i in P:
|
| 59 |
+
erosions.append(ndi.binary_erosion(u, P_i).astype(np.int8))
|
| 60 |
+
|
| 61 |
+
return np.stack(erosions, axis=0).max(0)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def inf_sup(u):
|
| 65 |
+
"""IS operator."""
|
| 66 |
+
|
| 67 |
+
if np.ndim(u) == 2:
|
| 68 |
+
P = _P2
|
| 69 |
+
elif np.ndim(u) == 3:
|
| 70 |
+
P = _P3
|
| 71 |
+
else:
|
| 72 |
+
raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)")
|
| 73 |
+
|
| 74 |
+
dilations = []
|
| 75 |
+
for P_i in P:
|
| 76 |
+
dilations.append(ndi.binary_dilation(u, P_i).astype(np.int8))
|
| 77 |
+
|
| 78 |
+
return np.stack(dilations, axis=0).min(0)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
_curvop = _fcycle(
|
| 82 |
+
[lambda u: sup_inf(inf_sup(u)), lambda u: inf_sup(sup_inf(u))] # SIoIS
|
| 83 |
+
) # ISoSI
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _check_input(image, init_level_set):
|
| 87 |
+
"""Check that shapes of `image` and `init_level_set` match."""
|
| 88 |
+
check_nD(image, [2, 3])
|
| 89 |
+
|
| 90 |
+
if len(image.shape) != len(init_level_set.shape):
|
| 91 |
+
raise ValueError(
|
| 92 |
+
"The dimensions of the initial level set do not "
|
| 93 |
+
"match the dimensions of the image."
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _init_level_set(init_level_set, image_shape):
|
| 98 |
+
"""Auxiliary function for initializing level sets with a string.
|
| 99 |
+
|
| 100 |
+
If `init_level_set` is not a string, it is returned as is.
|
| 101 |
+
"""
|
| 102 |
+
if isinstance(init_level_set, str):
|
| 103 |
+
if init_level_set == 'checkerboard':
|
| 104 |
+
res = checkerboard_level_set(image_shape)
|
| 105 |
+
elif init_level_set == 'disk':
|
| 106 |
+
res = disk_level_set(image_shape)
|
| 107 |
+
else:
|
| 108 |
+
raise ValueError("`init_level_set` not in " "['checkerboard', 'disk']")
|
| 109 |
+
else:
|
| 110 |
+
res = init_level_set
|
| 111 |
+
return res
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def disk_level_set(image_shape, *, center=None, radius=None):
|
| 115 |
+
"""Create a disk level set with binary values.
|
| 116 |
+
|
| 117 |
+
Parameters
|
| 118 |
+
----------
|
| 119 |
+
image_shape : tuple of positive integers
|
| 120 |
+
Shape of the image
|
| 121 |
+
center : tuple of positive integers, optional
|
| 122 |
+
Coordinates of the center of the disk given in (row, column). If not
|
| 123 |
+
given, it defaults to the center of the image.
|
| 124 |
+
radius : float, optional
|
| 125 |
+
Radius of the disk. If not given, it is set to the 75% of the
|
| 126 |
+
smallest image dimension.
|
| 127 |
+
|
| 128 |
+
Returns
|
| 129 |
+
-------
|
| 130 |
+
out : array with shape `image_shape`
|
| 131 |
+
Binary level set of the disk with the given `radius` and `center`.
|
| 132 |
+
|
| 133 |
+
See Also
|
| 134 |
+
--------
|
| 135 |
+
checkerboard_level_set
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
if center is None:
|
| 139 |
+
center = tuple(i // 2 for i in image_shape)
|
| 140 |
+
|
| 141 |
+
if radius is None:
|
| 142 |
+
radius = min(image_shape) * 3.0 / 8.0
|
| 143 |
+
|
| 144 |
+
grid = np.mgrid[[slice(i) for i in image_shape]]
|
| 145 |
+
grid = (grid.T - center).T
|
| 146 |
+
phi = radius - np.sqrt(np.sum((grid) ** 2, 0))
|
| 147 |
+
res = np.int8(phi > 0)
|
| 148 |
+
return res
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def checkerboard_level_set(image_shape, square_size=5):
|
| 152 |
+
"""Create a checkerboard level set with binary values.
|
| 153 |
+
|
| 154 |
+
Parameters
|
| 155 |
+
----------
|
| 156 |
+
image_shape : tuple of positive integers
|
| 157 |
+
Shape of the image.
|
| 158 |
+
square_size : int, optional
|
| 159 |
+
Size of the squares of the checkerboard. It defaults to 5.
|
| 160 |
+
|
| 161 |
+
Returns
|
| 162 |
+
-------
|
| 163 |
+
out : array with shape `image_shape`
|
| 164 |
+
Binary level set of the checkerboard.
|
| 165 |
+
|
| 166 |
+
See Also
|
| 167 |
+
--------
|
| 168 |
+
disk_level_set
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
grid = np.mgrid[[slice(i) for i in image_shape]]
|
| 172 |
+
grid = grid // square_size
|
| 173 |
+
|
| 174 |
+
# Alternate 0/1 for even/odd numbers.
|
| 175 |
+
grid = grid & 1
|
| 176 |
+
|
| 177 |
+
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
|
| 178 |
+
res = np.int8(checkerboard)
|
| 179 |
+
return res
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
|
| 183 |
+
"""Inverse of gradient magnitude.
|
| 184 |
+
|
| 185 |
+
Compute the magnitude of the gradients in the image and then inverts the
|
| 186 |
+
result in the range [0, 1]. Flat areas are assigned values close to 1,
|
| 187 |
+
while areas close to borders are assigned values close to 0.
|
| 188 |
+
|
| 189 |
+
This function or a similar one defined by the user should be applied over
|
| 190 |
+
the image as a preprocessing step before calling
|
| 191 |
+
`morphological_geodesic_active_contour`.
|
| 192 |
+
|
| 193 |
+
Parameters
|
| 194 |
+
----------
|
| 195 |
+
image : (M, N) or (L, M, N) array
|
| 196 |
+
Grayscale image or volume.
|
| 197 |
+
alpha : float, optional
|
| 198 |
+
Controls the steepness of the inversion. A larger value will make the
|
| 199 |
+
transition between the flat areas and border areas steeper in the
|
| 200 |
+
resulting array.
|
| 201 |
+
sigma : float, optional
|
| 202 |
+
Standard deviation of the Gaussian filter applied over the image.
|
| 203 |
+
|
| 204 |
+
Returns
|
| 205 |
+
-------
|
| 206 |
+
gimage : (M, N) or (L, M, N) array
|
| 207 |
+
Preprocessed image (or volume) suitable for
|
| 208 |
+
`morphological_geodesic_active_contour`.
|
| 209 |
+
"""
|
| 210 |
+
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
|
| 211 |
+
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def morphological_chan_vese(
|
| 215 |
+
image,
|
| 216 |
+
num_iter,
|
| 217 |
+
init_level_set='checkerboard',
|
| 218 |
+
smoothing=1,
|
| 219 |
+
lambda1=1,
|
| 220 |
+
lambda2=1,
|
| 221 |
+
iter_callback=lambda x: None,
|
| 222 |
+
):
|
| 223 |
+
"""Morphological Active Contours without Edges (MorphACWE)
|
| 224 |
+
|
| 225 |
+
Active contours without edges implemented with morphological operators. It
|
| 226 |
+
can be used to segment objects in images and volumes without well defined
|
| 227 |
+
borders. It is required that the inside of the object looks different on
|
| 228 |
+
average than the outside (i.e., the inner area of the object should be
|
| 229 |
+
darker or lighter than the outer area on average).
|
| 230 |
+
|
| 231 |
+
Parameters
|
| 232 |
+
----------
|
| 233 |
+
image : (M, N) or (L, M, N) array
|
| 234 |
+
Grayscale image or volume to be segmented.
|
| 235 |
+
num_iter : uint
|
| 236 |
+
Number of num_iter to run
|
| 237 |
+
init_level_set : str, (M, N) array, or (L, M, N) array
|
| 238 |
+
Initial level set. If an array is given, it will be binarized and used
|
| 239 |
+
as the initial level set. If a string is given, it defines the method
|
| 240 |
+
to generate a reasonable initial level set with the shape of the
|
| 241 |
+
`image`. Accepted values are 'checkerboard' and 'disk'. See the
|
| 242 |
+
documentation of `checkerboard_level_set` and `disk_level_set`
|
| 243 |
+
respectively for details about how these level sets are created.
|
| 244 |
+
smoothing : uint, optional
|
| 245 |
+
Number of times the smoothing operator is applied per iteration.
|
| 246 |
+
Reasonable values are around 1-4. Larger values lead to smoother
|
| 247 |
+
segmentations.
|
| 248 |
+
lambda1 : float, optional
|
| 249 |
+
Weight parameter for the outer region. If `lambda1` is larger than
|
| 250 |
+
`lambda2`, the outer region will contain a larger range of values than
|
| 251 |
+
the inner region.
|
| 252 |
+
lambda2 : float, optional
|
| 253 |
+
Weight parameter for the inner region. If `lambda2` is larger than
|
| 254 |
+
`lambda1`, the inner region will contain a larger range of values than
|
| 255 |
+
the outer region.
|
| 256 |
+
iter_callback : function, optional
|
| 257 |
+
If given, this function is called once per iteration with the current
|
| 258 |
+
level set as the only argument. This is useful for debugging or for
|
| 259 |
+
plotting intermediate results during the evolution.
|
| 260 |
+
|
| 261 |
+
Returns
|
| 262 |
+
-------
|
| 263 |
+
out : (M, N) or (L, M, N) array
|
| 264 |
+
Final segmentation (i.e., the final level set)
|
| 265 |
+
|
| 266 |
+
See Also
|
| 267 |
+
--------
|
| 268 |
+
disk_level_set, checkerboard_level_set
|
| 269 |
+
|
| 270 |
+
Notes
|
| 271 |
+
-----
|
| 272 |
+
This is a version of the Chan-Vese algorithm that uses morphological
|
| 273 |
+
operators instead of solving a partial differential equation (PDE) for the
|
| 274 |
+
evolution of the contour. The set of morphological operators used in this
|
| 275 |
+
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
|
| 276 |
+
(see [1]_). However, morphological operators are do not suffer from the
|
| 277 |
+
numerical stability issues typically found in PDEs (it is not necessary to
|
| 278 |
+
find the right time step for the evolution), and are computationally
|
| 279 |
+
faster.
|
| 280 |
+
|
| 281 |
+
The algorithm and its theoretical derivation are described in [1]_.
|
| 282 |
+
|
| 283 |
+
References
|
| 284 |
+
----------
|
| 285 |
+
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
|
| 286 |
+
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
|
| 287 |
+
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
|
| 288 |
+
2014, :DOI:`10.1109/TPAMI.2013.106`
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
init_level_set = _init_level_set(init_level_set, image.shape)
|
| 292 |
+
|
| 293 |
+
_check_input(image, init_level_set)
|
| 294 |
+
|
| 295 |
+
u = np.int8(init_level_set > 0)
|
| 296 |
+
|
| 297 |
+
iter_callback(u)
|
| 298 |
+
|
| 299 |
+
for _ in range(num_iter):
|
| 300 |
+
# inside = u > 0
|
| 301 |
+
# outside = u <= 0
|
| 302 |
+
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
|
| 303 |
+
c1 = (image * u).sum() / float(u.sum() + 1e-8)
|
| 304 |
+
|
| 305 |
+
# Image attachment
|
| 306 |
+
du = np.gradient(u)
|
| 307 |
+
abs_du = np.abs(du).sum(0)
|
| 308 |
+
aux = abs_du * (lambda1 * (image - c1) ** 2 - lambda2 * (image - c0) ** 2)
|
| 309 |
+
|
| 310 |
+
u[aux < 0] = 1
|
| 311 |
+
u[aux > 0] = 0
|
| 312 |
+
|
| 313 |
+
# Smoothing
|
| 314 |
+
for _ in range(smoothing):
|
| 315 |
+
u = _curvop(u)
|
| 316 |
+
|
| 317 |
+
iter_callback(u)
|
| 318 |
+
|
| 319 |
+
return u
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def morphological_geodesic_active_contour(
|
| 323 |
+
gimage,
|
| 324 |
+
num_iter,
|
| 325 |
+
init_level_set='disk',
|
| 326 |
+
smoothing=1,
|
| 327 |
+
threshold='auto',
|
| 328 |
+
balloon=0,
|
| 329 |
+
iter_callback=lambda x: None,
|
| 330 |
+
):
|
| 331 |
+
"""Morphological Geodesic Active Contours (MorphGAC).
|
| 332 |
+
|
| 333 |
+
Geodesic active contours implemented with morphological operators. It can
|
| 334 |
+
be used to segment objects with visible but noisy, cluttered, broken
|
| 335 |
+
borders.
|
| 336 |
+
|
| 337 |
+
Parameters
|
| 338 |
+
----------
|
| 339 |
+
gimage : (M, N) or (L, M, N) array
|
| 340 |
+
Preprocessed image or volume to be segmented. This is very rarely the
|
| 341 |
+
original image. Instead, this is usually a preprocessed version of the
|
| 342 |
+
original image that enhances and highlights the borders (or other
|
| 343 |
+
structures) of the object to segment.
|
| 344 |
+
:func:`morphological_geodesic_active_contour` will try to stop the contour
|
| 345 |
+
evolution in areas where `gimage` is small. See
|
| 346 |
+
:func:`inverse_gaussian_gradient` as an example function to
|
| 347 |
+
perform this preprocessing. Note that the quality of
|
| 348 |
+
:func:`morphological_geodesic_active_contour` might greatly depend on this
|
| 349 |
+
preprocessing.
|
| 350 |
+
num_iter : uint
|
| 351 |
+
Number of num_iter to run.
|
| 352 |
+
init_level_set : str, (M, N) array, or (L, M, N) array
|
| 353 |
+
Initial level set. If an array is given, it will be binarized and used
|
| 354 |
+
as the initial level set. If a string is given, it defines the method
|
| 355 |
+
to generate a reasonable initial level set with the shape of the
|
| 356 |
+
`image`. Accepted values are 'checkerboard' and 'disk'. See the
|
| 357 |
+
documentation of `checkerboard_level_set` and `disk_level_set`
|
| 358 |
+
respectively for details about how these level sets are created.
|
| 359 |
+
smoothing : uint, optional
|
| 360 |
+
Number of times the smoothing operator is applied per iteration.
|
| 361 |
+
Reasonable values are around 1-4. Larger values lead to smoother
|
| 362 |
+
segmentations.
|
| 363 |
+
threshold : float, optional
|
| 364 |
+
Areas of the image with a value smaller than this threshold will be
|
| 365 |
+
considered borders. The evolution of the contour will stop in these
|
| 366 |
+
areas.
|
| 367 |
+
balloon : float, optional
|
| 368 |
+
Balloon force to guide the contour in non-informative areas of the
|
| 369 |
+
image, i.e., areas where the gradient of the image is too small to push
|
| 370 |
+
the contour towards a border. A negative value will shrink the contour,
|
| 371 |
+
while a positive value will expand the contour in these areas. Setting
|
| 372 |
+
this to zero will disable the balloon force.
|
| 373 |
+
iter_callback : function, optional
|
| 374 |
+
If given, this function is called once per iteration with the current
|
| 375 |
+
level set as the only argument. This is useful for debugging or for
|
| 376 |
+
plotting intermediate results during the evolution.
|
| 377 |
+
|
| 378 |
+
Returns
|
| 379 |
+
-------
|
| 380 |
+
out : (M, N) or (L, M, N) array
|
| 381 |
+
Final segmentation (i.e., the final level set)
|
| 382 |
+
|
| 383 |
+
See Also
|
| 384 |
+
--------
|
| 385 |
+
inverse_gaussian_gradient, disk_level_set, checkerboard_level_set
|
| 386 |
+
|
| 387 |
+
Notes
|
| 388 |
+
-----
|
| 389 |
+
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
|
| 390 |
+
morphological operators instead of solving partial differential equations
|
| 391 |
+
(PDEs) for the evolution of the contour. The set of morphological operators
|
| 392 |
+
used in this algorithm are proved to be infinitesimally equivalent to the
|
| 393 |
+
GAC PDEs (see [1]_). However, morphological operators are do not suffer
|
| 394 |
+
from the numerical stability issues typically found in PDEs (e.g., it is
|
| 395 |
+
not necessary to find the right time step for the evolution), and are
|
| 396 |
+
computationally faster.
|
| 397 |
+
|
| 398 |
+
The algorithm and its theoretical derivation are described in [1]_.
|
| 399 |
+
|
| 400 |
+
References
|
| 401 |
+
----------
|
| 402 |
+
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
|
| 403 |
+
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
|
| 404 |
+
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
|
| 405 |
+
2014, :DOI:`10.1109/TPAMI.2013.106`
|
| 406 |
+
"""
|
| 407 |
+
|
| 408 |
+
image = gimage
|
| 409 |
+
init_level_set = _init_level_set(init_level_set, image.shape)
|
| 410 |
+
|
| 411 |
+
_check_input(image, init_level_set)
|
| 412 |
+
|
| 413 |
+
if threshold == 'auto':
|
| 414 |
+
threshold = np.percentile(image, 40)
|
| 415 |
+
|
| 416 |
+
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
|
| 417 |
+
dimage = np.gradient(image)
|
| 418 |
+
# threshold_mask = image > threshold
|
| 419 |
+
if balloon != 0:
|
| 420 |
+
threshold_mask_balloon = image > threshold / np.abs(balloon)
|
| 421 |
+
|
| 422 |
+
u = np.int8(init_level_set > 0)
|
| 423 |
+
|
| 424 |
+
iter_callback(u)
|
| 425 |
+
|
| 426 |
+
for _ in range(num_iter):
|
| 427 |
+
# Balloon
|
| 428 |
+
if balloon > 0:
|
| 429 |
+
aux = ndi.binary_dilation(u, structure)
|
| 430 |
+
elif balloon < 0:
|
| 431 |
+
aux = ndi.binary_erosion(u, structure)
|
| 432 |
+
if balloon != 0:
|
| 433 |
+
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
|
| 434 |
+
|
| 435 |
+
# Image attachment
|
| 436 |
+
aux = np.zeros_like(image)
|
| 437 |
+
du = np.gradient(u)
|
| 438 |
+
for el1, el2 in zip(dimage, du):
|
| 439 |
+
aux += el1 * el2
|
| 440 |
+
u[aux > 0] = 1
|
| 441 |
+
u[aux < 0] = 0
|
| 442 |
+
|
| 443 |
+
# Smoothing
|
| 444 |
+
for _ in range(smoothing):
|
| 445 |
+
u = _curvop(u)
|
| 446 |
+
|
| 447 |
+
iter_callback(u)
|
| 448 |
+
|
| 449 |
+
return u
|