Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llava_next/lib/python3.10/site-packages/torch/_inductor/autotune_process.py +273 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/aot_inductor_interface.cpp +171 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/common.py +1031 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h +372 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py +234 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py +73 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py +1505 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py +307 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/cuda_properties.py +58 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py +274 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py +192 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py +568 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py +575 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py +256 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py +1080 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py +445 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py +602 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py +460 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py +1020 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py +125 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/split_cat.py +982 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/index_propagation.py +240 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/utils.py +1045 -0
- llava_next/lib/python3.10/site-packages/torch/_inductor/wrapper_benchmark.py +293 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.py +5 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.pyi +109 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/_fft_based.py +189 -0
- vlmpy310/lib/python3.10/site-packages/skimage/filters/_gabor.py +220 -0
llava_next/lib/python3.10/site-packages/torch/_inductor/autotune_process.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import queue
|
| 3 |
+
import time
|
| 4 |
+
import warnings
|
| 5 |
+
from multiprocessing.process import BaseProcess
|
| 6 |
+
from multiprocessing.queues import Queue
|
| 7 |
+
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
from torch import multiprocessing
|
| 11 |
+
from torch._dynamo.testing import rand_strided
|
| 12 |
+
|
| 13 |
+
from torch._inductor import ir
|
| 14 |
+
from torch._inductor.codecache import PyCodeCache
|
| 15 |
+
|
| 16 |
+
if TYPE_CHECKING:
|
| 17 |
+
from torch._inductor.select_algorithm import TritonTemplateCaller
|
| 18 |
+
|
| 19 |
+
from .utils import do_bench
|
| 20 |
+
from .virtualized import V
|
| 21 |
+
|
| 22 |
+
DEBUG = False
|
| 23 |
+
EXIT_HANDLER_REGISTERED = False
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Used to synchronize between parent and child processes
|
| 27 |
+
class Ping:
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class Pong:
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@dataclasses.dataclass
|
| 36 |
+
class TuningProcess:
|
| 37 |
+
process: Optional[BaseProcess] = None
|
| 38 |
+
request_queue: Optional["Queue[Any]"] = None
|
| 39 |
+
response_queue: Optional["Queue[Any]"] = None
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def process_main(
|
| 43 |
+
request_queue: "Queue[Any]",
|
| 44 |
+
response_queue: "Queue[Any]",
|
| 45 |
+
) -> None:
|
| 46 |
+
print("enter child process main")
|
| 47 |
+
while True:
|
| 48 |
+
obj = request_queue.get()
|
| 49 |
+
|
| 50 |
+
if obj is None:
|
| 51 |
+
break # None is a sentinel for the child to terminate
|
| 52 |
+
elif isinstance(obj, Ping):
|
| 53 |
+
response_queue.put(Pong())
|
| 54 |
+
elif isinstance(obj, BenchmarkRequest):
|
| 55 |
+
response_queue.put(obj.benchmark())
|
| 56 |
+
else:
|
| 57 |
+
raise RuntimeError(f"Invalid request type {type(obj)}")
|
| 58 |
+
|
| 59 |
+
def valid(self) -> bool:
|
| 60 |
+
return (
|
| 61 |
+
self.process is not None
|
| 62 |
+
and self.request_queue is not None
|
| 63 |
+
and self.response_queue is not None
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
def clear(self) -> None:
|
| 67 |
+
self.process = self.request_queue = self.response_queue = None
|
| 68 |
+
|
| 69 |
+
def initialize(self) -> None:
|
| 70 |
+
"""
|
| 71 |
+
Create child process, request/response queues and do the warm up.
|
| 72 |
+
"""
|
| 73 |
+
if self.valid():
|
| 74 |
+
return
|
| 75 |
+
|
| 76 |
+
# cuda runtime does not work with "fork", use "spawn" to start processes.
|
| 77 |
+
ctx = multiprocessing.get_context("spawn")
|
| 78 |
+
request_queue = self.request_queue = ctx.Queue()
|
| 79 |
+
response_queue = self.response_queue = ctx.Queue()
|
| 80 |
+
|
| 81 |
+
process = self.process = ctx.Process(
|
| 82 |
+
target=self.process_main,
|
| 83 |
+
args=(
|
| 84 |
+
self.request_queue,
|
| 85 |
+
self.response_queue,
|
| 86 |
+
),
|
| 87 |
+
)
|
| 88 |
+
process.start()
|
| 89 |
+
|
| 90 |
+
# register the exit handler for the parent process so it will terminate
|
| 91 |
+
# the child processes
|
| 92 |
+
global EXIT_HANDLER_REGISTERED
|
| 93 |
+
if not EXIT_HANDLER_REGISTERED:
|
| 94 |
+
EXIT_HANDLER_REGISTERED = True
|
| 95 |
+
import atexit
|
| 96 |
+
|
| 97 |
+
atexit.register(lambda: self.terminate())
|
| 98 |
+
|
| 99 |
+
# wait for the initialization to be done
|
| 100 |
+
request_queue.put(Ping())
|
| 101 |
+
resp = response_queue.get()
|
| 102 |
+
assert isinstance(resp, Pong)
|
| 103 |
+
|
| 104 |
+
def terminate(self) -> None:
|
| 105 |
+
if self.valid():
|
| 106 |
+
request_queue = self.request_queue
|
| 107 |
+
assert request_queue is not None
|
| 108 |
+
request_queue.put(None)
|
| 109 |
+
process = self.process
|
| 110 |
+
assert process is not None
|
| 111 |
+
process.join()
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
tuning_process = TuningProcess()
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
LayoutOrBuffer = Union[ir.Layout, ir.Buffer]
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@dataclasses.dataclass
|
| 121 |
+
class TensorMeta:
|
| 122 |
+
device: torch.device
|
| 123 |
+
dtype: torch.dtype
|
| 124 |
+
sizes: List[int]
|
| 125 |
+
strides: List[int]
|
| 126 |
+
offset: int
|
| 127 |
+
|
| 128 |
+
@classmethod
|
| 129 |
+
def from_irnodes(
|
| 130 |
+
cls, irnodes: Union[LayoutOrBuffer, Tuple[LayoutOrBuffer], List[LayoutOrBuffer]]
|
| 131 |
+
) -> Union["TensorMeta", List["TensorMeta"]]:
|
| 132 |
+
if isinstance(irnodes, (tuple, list)):
|
| 133 |
+
result: List[Any] = [cls.from_irnodes(x) for x in irnodes]
|
| 134 |
+
assert all(isinstance(x, TensorMeta) for x in result)
|
| 135 |
+
return result
|
| 136 |
+
|
| 137 |
+
node = irnodes
|
| 138 |
+
if isinstance(node, ir.Layout):
|
| 139 |
+
node = ir.Buffer("fake", node)
|
| 140 |
+
|
| 141 |
+
dtype = node.get_dtype()
|
| 142 |
+
assert dtype is not None
|
| 143 |
+
|
| 144 |
+
return TensorMeta(
|
| 145 |
+
device=node.get_device(),
|
| 146 |
+
dtype=dtype,
|
| 147 |
+
sizes=V.graph.sizevars.size_hints(node.get_size()),
|
| 148 |
+
strides=V.graph.sizevars.size_hints(node.get_stride()),
|
| 149 |
+
offset=V.graph.sizevars.size_hint(node.get_layout().offset),
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
def to_tensor(self) -> torch.Tensor:
|
| 153 |
+
return rand_strided(
|
| 154 |
+
self.sizes,
|
| 155 |
+
self.strides,
|
| 156 |
+
device=self.device,
|
| 157 |
+
dtype=self.dtype,
|
| 158 |
+
extra_size=self.offset,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
@dataclasses.dataclass
|
| 163 |
+
class BenchmarkRequest:
|
| 164 |
+
"""
|
| 165 |
+
Only handle triton template benchmark for now. The extern kernel benchmark
|
| 166 |
+
can be done inside the same process since they usually don't cause crash.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
module_path: str # the path of the module defining the triton kernel
|
| 170 |
+
module_cache_key: str
|
| 171 |
+
kernel_name: str # the kernel name defined in the module
|
| 172 |
+
grid: List[int]
|
| 173 |
+
extra_args: Dict[str, Any]
|
| 174 |
+
num_stages: int
|
| 175 |
+
num_warps: int
|
| 176 |
+
|
| 177 |
+
input_tensors: Union["TensorMeta", List["TensorMeta"]]
|
| 178 |
+
output_tensor: Union["TensorMeta", List["TensorMeta"]]
|
| 179 |
+
|
| 180 |
+
def benchmark(
|
| 181 |
+
self, *input_tensors: torch.Tensor, output_tensor: Optional[torch.Tensor] = None
|
| 182 |
+
) -> float:
|
| 183 |
+
if DEBUG:
|
| 184 |
+
start_ts = time.time()
|
| 185 |
+
|
| 186 |
+
mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path)
|
| 187 |
+
if DEBUG:
|
| 188 |
+
print(
|
| 189 |
+
f"benchmark module key: {self.module_cache_key}, path: {self.module_path}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
run = getattr(mod, self.kernel_name).run
|
| 193 |
+
|
| 194 |
+
if DEBUG:
|
| 195 |
+
load_elapse = time.time() - start_ts
|
| 196 |
+
start_ts = time.time()
|
| 197 |
+
|
| 198 |
+
# create args and out tensor
|
| 199 |
+
if output_tensor is None:
|
| 200 |
+
assert len(input_tensors) == 0
|
| 201 |
+
if isinstance(self.input_tensors, List):
|
| 202 |
+
input_tensors = tuple(x.to_tensor() for x in self.input_tensors)
|
| 203 |
+
if isinstance(self.input_tensors, TensorMeta):
|
| 204 |
+
input_tensors = tuple(self.input_tensors.to_tensor())
|
| 205 |
+
assert isinstance(self.output_tensor, TensorMeta)
|
| 206 |
+
output_tensor = self.output_tensor.to_tensor()
|
| 207 |
+
|
| 208 |
+
if DEBUG:
|
| 209 |
+
create_tensor_elapse = time.time() - start_ts
|
| 210 |
+
start_ts = time.time()
|
| 211 |
+
|
| 212 |
+
def worker() -> float:
|
| 213 |
+
return run(
|
| 214 |
+
*input_tensors,
|
| 215 |
+
output_tensor,
|
| 216 |
+
*self.extra_args,
|
| 217 |
+
grid=self.grid,
|
| 218 |
+
num_stages=self.num_stages,
|
| 219 |
+
num_warps=self.num_warps,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
out = do_bench(worker)
|
| 223 |
+
torch.cuda.synchronize() # shake out any CUDA errors
|
| 224 |
+
|
| 225 |
+
if DEBUG:
|
| 226 |
+
bench_elapse = time.time() - start_ts
|
| 227 |
+
print(
|
| 228 |
+
f"InChidProcess {self.module_cache_key}: load {load_elapse}, "
|
| 229 |
+
+ f"create tensor {create_tensor_elapse}, bench {bench_elapse}"
|
| 230 |
+
)
|
| 231 |
+
return out
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def benchmark_in_sub_process(
|
| 235 |
+
choice: "TritonTemplateCaller",
|
| 236 |
+
) -> float:
|
| 237 |
+
"""
|
| 238 |
+
Do benchmarking in subprocess and return the perf number (latency).
|
| 239 |
+
"""
|
| 240 |
+
assert choice.bmreq is not None
|
| 241 |
+
tuning_process.initialize()
|
| 242 |
+
assert tuning_process.valid()
|
| 243 |
+
process, request_queue, response_queue = (
|
| 244 |
+
tuning_process.process,
|
| 245 |
+
tuning_process.request_queue,
|
| 246 |
+
tuning_process.response_queue,
|
| 247 |
+
)
|
| 248 |
+
assert (
|
| 249 |
+
process is not None and request_queue is not None and response_queue is not None
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
request_queue.put(choice.bmreq)
|
| 253 |
+
while True:
|
| 254 |
+
try:
|
| 255 |
+
timing = response_queue.get(timeout=1.0)
|
| 256 |
+
except queue.Empty:
|
| 257 |
+
status = process.exitcode
|
| 258 |
+
if status is None:
|
| 259 |
+
# child process is still running
|
| 260 |
+
continue
|
| 261 |
+
# child process fail
|
| 262 |
+
assert status != 0
|
| 263 |
+
|
| 264 |
+
warnings.warn(
|
| 265 |
+
f"Fail to benchmark choice '{choice}'. It will be ignored. Please debug the root cause in case the choice can bring perf gains." # noqa: B950 line too long
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
tuning_process.clear()
|
| 269 |
+
|
| 270 |
+
# return INF so this choice will be ignored
|
| 271 |
+
return float("inf")
|
| 272 |
+
|
| 273 |
+
return timing
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (31.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/cpp.cpython-310.pyc
ADDED
|
Binary file (92.7 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton.cpython-310.pyc
ADDED
|
Binary file (79.9 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/triton_utils.cpython-310.pyc
ADDED
|
Binary file (2.67 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/__pycache__/wrapper.cpython-310.pyc
ADDED
|
Binary file (48.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/aot_inductor_interface.cpp
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/csrc/inductor/aot_inductor_interface.h>
|
| 2 |
+
#include <torch/csrc/inductor/aot_inductor_model_container.h>
|
| 3 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 4 |
+
#include <iostream>
|
| 5 |
+
#include <stdexcept>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#define CONVERT_EXCEPTION_TO_ERROR_CODE(...) \
|
| 9 |
+
try { \
|
| 10 |
+
__VA_ARGS__ \
|
| 11 |
+
} catch (const std::exception& e) { \
|
| 12 |
+
std::cerr << "Error: " << e.what() << std::endl; \
|
| 13 |
+
return AOTInductorError::Failure; \
|
| 14 |
+
} catch (...) { \
|
| 15 |
+
std::cerr << "Unknown exception occurred." << std::endl; \
|
| 16 |
+
return AOTInductorError::Failure; \
|
| 17 |
+
} \
|
| 18 |
+
return AOTInductorError::Success;
|
| 19 |
+
|
| 20 |
+
extern "C" {
|
| 21 |
+
|
| 22 |
+
AOTInductorError AOTInductorModelContainerCreate(
|
| 23 |
+
AOTInductorModelContainerHandle* container_handle,
|
| 24 |
+
size_t num_models) {
|
| 25 |
+
if (num_models == 0) {
|
| 26 |
+
LOG(ERROR) << "num_models must be positive, but got 0";
|
| 27 |
+
return AOTInductorError::Failure;
|
| 28 |
+
}
|
| 29 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE({
|
| 30 |
+
auto* container =
|
| 31 |
+
new torch::aot_inductor::AOTInductorModelContainer(num_models);
|
| 32 |
+
*container_handle =
|
| 33 |
+
reinterpret_cast<AOTInductorModelContainerHandle>(container);
|
| 34 |
+
})
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
AOTInductorError AOTInductorModelContainerDelete(
|
| 38 |
+
AOTInductorModelContainerHandle container_handle) {
|
| 39 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE({
|
| 40 |
+
auto* container =
|
| 41 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 42 |
+
container_handle);
|
| 43 |
+
delete container;
|
| 44 |
+
});
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
AOTInductorError AOTInductorModelContainerRun(
|
| 48 |
+
AOTInductorModelContainerHandle container_handle,
|
| 49 |
+
const AOTInductorTensorHandle inputs_handle,
|
| 50 |
+
size_t num_inputs,
|
| 51 |
+
AOTInductorTensorHandle outputs_handle,
|
| 52 |
+
size_t num_outputs,
|
| 53 |
+
AOTInductorStreamHandle stream_handle) {
|
| 54 |
+
auto* container =
|
| 55 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 56 |
+
container_handle);
|
| 57 |
+
|
| 58 |
+
const auto* inputs = reinterpret_cast<const at::Tensor*>(inputs_handle);
|
| 59 |
+
std::vector<at::Tensor> input_tensors;
|
| 60 |
+
input_tensors.reserve(num_inputs);
|
| 61 |
+
for (size_t i = 0; i < num_inputs; i++) {
|
| 62 |
+
input_tensors.push_back(inputs[i]);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
auto* outputs = reinterpret_cast<at::Tensor*>(outputs_handle);
|
| 66 |
+
std::vector<at::Tensor> output_tensors;
|
| 67 |
+
output_tensors.reserve(num_outputs);
|
| 68 |
+
for (size_t i = 0; i < num_outputs; i++) {
|
| 69 |
+
output_tensors.push_back(outputs[i]);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
auto stream = reinterpret_cast<cudaStream_t>(stream_handle);
|
| 73 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 74 |
+
{ container->run(input_tensors, output_tensors, stream); })
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
AOTInductorError AOTInductorModelContainerGetNumInputs(
|
| 78 |
+
AOTInductorModelContainerHandle container_handle,
|
| 79 |
+
size_t* num_inputs_out) {
|
| 80 |
+
auto* container =
|
| 81 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 82 |
+
container_handle);
|
| 83 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 84 |
+
{ *num_inputs_out = container->num_inputs(); })
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
AOTInductorError AOTInductorModelContainerGetInputName(
|
| 88 |
+
AOTInductorModelContainerHandle container_handle,
|
| 89 |
+
size_t input_idx,
|
| 90 |
+
const char** input_name_out) {
|
| 91 |
+
auto* container =
|
| 92 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 93 |
+
container_handle);
|
| 94 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 95 |
+
{ *input_name_out = container->input_name(input_idx); })
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
AOTInductorError AOTInductorModelContainerGetInputDtype(
|
| 99 |
+
AOTInductorModelContainerHandle container_handle,
|
| 100 |
+
size_t input_idx,
|
| 101 |
+
const char** input_dtype_out) {
|
| 102 |
+
auto* container =
|
| 103 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 104 |
+
container_handle);
|
| 105 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 106 |
+
{ *input_dtype_out = container->get_input_dtype(input_idx); })
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
AOTInductorError AOTInductorModelContainerGetNumOutputs(
|
| 110 |
+
AOTInductorModelContainerHandle container_handle,
|
| 111 |
+
size_t* num_outputs_out) {
|
| 112 |
+
auto* container =
|
| 113 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 114 |
+
container_handle);
|
| 115 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 116 |
+
{ *num_outputs_out = container->num_outputs(); })
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
AOTInductorError AOTInductorModelContainerGetOutputName(
|
| 120 |
+
AOTInductorModelContainerHandle container_handle,
|
| 121 |
+
size_t output_idx,
|
| 122 |
+
const char** output_name_out) {
|
| 123 |
+
auto* container =
|
| 124 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 125 |
+
container_handle);
|
| 126 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 127 |
+
{ *output_name_out = container->output_name(output_idx); })
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
AOTInductorError AOTInductorModelContainerGetOutputDtype(
|
| 131 |
+
AOTInductorModelContainerHandle container_handle,
|
| 132 |
+
size_t output_idx,
|
| 133 |
+
const char** output_dtype_out) {
|
| 134 |
+
auto* container =
|
| 135 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 136 |
+
container_handle);
|
| 137 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE(
|
| 138 |
+
{ *output_dtype_out = container->get_output_dtype(output_idx); })
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
AOTInductorError AOTInductorModelContainerGetMaxInputShape(
|
| 142 |
+
AOTInductorModelContainerHandle container_handle,
|
| 143 |
+
size_t input_idx,
|
| 144 |
+
AOTInductorParamShape* input_shape) {
|
| 145 |
+
auto* container =
|
| 146 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 147 |
+
container_handle);
|
| 148 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE({
|
| 149 |
+
const std::vector<int64_t>& max_input_shape =
|
| 150 |
+
container->max_input_shape(input_idx);
|
| 151 |
+
*input_shape =
|
| 152 |
+
AOTInductorParamShape(max_input_shape.data(), max_input_shape.size());
|
| 153 |
+
})
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
AOTInductorError AOTInductorModelContainerGetMaxOutputShape(
|
| 157 |
+
AOTInductorModelContainerHandle container_handle,
|
| 158 |
+
size_t output_idx,
|
| 159 |
+
AOTInductorParamShape* output_shape) {
|
| 160 |
+
auto* container =
|
| 161 |
+
reinterpret_cast<torch::aot_inductor::AOTInductorModelContainer*>(
|
| 162 |
+
container_handle);
|
| 163 |
+
CONVERT_EXCEPTION_TO_ERROR_CODE({
|
| 164 |
+
const std::vector<int64_t>& max_output_shape =
|
| 165 |
+
container->max_output_shape(output_idx);
|
| 166 |
+
*output_shape =
|
| 167 |
+
AOTInductorParamShape(max_output_shape.data(), max_output_shape.size());
|
| 168 |
+
})
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
} // extern "C"
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/common.py
ADDED
|
@@ -0,0 +1,1031 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import contextlib
|
| 2 |
+
import dataclasses
|
| 3 |
+
import functools
|
| 4 |
+
import itertools
|
| 5 |
+
import logging
|
| 6 |
+
import operator
|
| 7 |
+
import re
|
| 8 |
+
from collections import namedtuple
|
| 9 |
+
from itertools import chain
|
| 10 |
+
from typing import Any, Callable, ClassVar, Dict, List, NamedTuple, Optional, Set, Union
|
| 11 |
+
|
| 12 |
+
import sympy
|
| 13 |
+
from sympy.printing.printer import Printer
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch.fx
|
| 17 |
+
from torch.utils._sympy.value_ranges import ValueRanges
|
| 18 |
+
|
| 19 |
+
from .. import metrics
|
| 20 |
+
from ..utils import (
|
| 21 |
+
DeferredLineBase,
|
| 22 |
+
free_symbol_startswith,
|
| 23 |
+
get_sympy_Expr_dtype,
|
| 24 |
+
IndentedBuffer,
|
| 25 |
+
sympy_dot,
|
| 26 |
+
sympy_subs,
|
| 27 |
+
unique,
|
| 28 |
+
)
|
| 29 |
+
from ..virtualized import ops, OpsValue, V
|
| 30 |
+
|
| 31 |
+
schedule_log = torch._logging.getArtifactLogger(__name__, "schedule")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def data_type_logger(msg):
|
| 35 |
+
if schedule_log.isEnabledFor(logging.DEBUG):
|
| 36 |
+
schedule_log.debug("Data type propagation: %s", msg)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
TensorArg = namedtuple("TensorArg", ["name", "buffer", "dtype"])
|
| 40 |
+
SizeArg = namedtuple("SizeArg", ["name", "expr"])
|
| 41 |
+
|
| 42 |
+
DeviceCodegen = namedtuple("DeviceCodegen", ["scheduling", "wrapper_codegen"])
|
| 43 |
+
device_codegens: Dict[str, DeviceCodegen] = {}
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# The code generated by Inductor consists of two main parts: kernel code and wrapper code.
|
| 47 |
+
# For any new backend looking to integrate with Inductor, customization of these two main
|
| 48 |
+
# parts are necessary to generate its specific code.
|
| 49 |
+
#
|
| 50 |
+
# Kernel code generation is determined by different Scheduling. Consequently, a new
|
| 51 |
+
# backend needs to provide a custom Scheduling for its unique kernel code generation. Currently,
|
| 52 |
+
# CppScheduling and TritonScheduling serve the C++/OpenMP and Triton backends, respectively.
|
| 53 |
+
#
|
| 54 |
+
# For the Wrapper, Inductor provides a WrapperCodeGen class to generate the Python wrapper code
|
| 55 |
+
# that bridges kernels. This allows out-of-tree backends to inherit from WrapperCodeGen,
|
| 56 |
+
# and override specific member functions to create backend-specific Python wrapper code.
|
| 57 |
+
#
|
| 58 |
+
# Other classes, such as CppKernel and TritonKernel, used for code generation, typically form part
|
| 59 |
+
# of the logic for either Scheduling or WrapperCodeGen. So the Scheduling and WrapperCodeGen interfaces
|
| 60 |
+
# provide flexibility to the backend. A backend can choose to implement these classes from scratch,
|
| 61 |
+
# or reuse them by extending and overriding as necessary. And Inductor provides the registration API,
|
| 62 |
+
# register_backend_for_device, to equip a new backend at runtime.
|
| 63 |
+
#
|
| 64 |
+
# Intel has developed a new backend on top of Triton to support Intel GPUs, leveraging these interfaces.
|
| 65 |
+
# This backend can be used as a reference:
|
| 66 |
+
# https://github.com/intel/intel-extension-for-pytorch/blob/5dcc9d57e5422cf295e1a1ee97896d6b6a554a85/intel_extension_for_pytorch/_inductor/__init__.py#L9
|
| 67 |
+
def register_backend_for_device(
|
| 68 |
+
device: str, device_scheduling: type, device_wrapper_codegen: type
|
| 69 |
+
):
|
| 70 |
+
device_codegens[device] = DeviceCodegen(device_scheduling, device_wrapper_codegen)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_scheduling_for_device(device: str):
|
| 74 |
+
return device_codegens[device].scheduling if device in device_codegens else None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_wrapper_codegen_for_device(device: str):
|
| 78 |
+
return (
|
| 79 |
+
device_codegens[device].wrapper_codegen if device in device_codegens else None
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def index_prevent_reordering(index: List[sympy.Expr], index_vars, sizes):
|
| 84 |
+
from ..ir import FlexibleLayout
|
| 85 |
+
|
| 86 |
+
# added contiguous index prevents reordering
|
| 87 |
+
return [*index, sympy_dot(index_vars, FlexibleLayout.contiguous_strides(sizes))]
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@functools.lru_cache(None)
|
| 91 |
+
def boolean_ops():
|
| 92 |
+
return (
|
| 93 |
+
"is_inf",
|
| 94 |
+
"is_nan",
|
| 95 |
+
"bitwise_xor",
|
| 96 |
+
"logical_not",
|
| 97 |
+
"signbit",
|
| 98 |
+
"le",
|
| 99 |
+
"lt",
|
| 100 |
+
"ge",
|
| 101 |
+
"gt",
|
| 102 |
+
"eq",
|
| 103 |
+
"ne",
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
DTYPE_TO_COMPUTATION_DTYPE = {
|
| 108 |
+
torch.bfloat16: torch.float,
|
| 109 |
+
torch.float16: torch.float,
|
| 110 |
+
**{
|
| 111 |
+
dtype: dtype
|
| 112 |
+
for dtype in [
|
| 113 |
+
torch.bool,
|
| 114 |
+
torch.float32,
|
| 115 |
+
torch.float64,
|
| 116 |
+
torch.int8,
|
| 117 |
+
torch.int16,
|
| 118 |
+
torch.int32,
|
| 119 |
+
torch.int64,
|
| 120 |
+
torch.uint8,
|
| 121 |
+
]
|
| 122 |
+
},
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class DataTypePropagation:
|
| 127 |
+
def __init__(self, body) -> None:
|
| 128 |
+
self.body = body
|
| 129 |
+
self.graphs: Dict[Union[Callable[..., Any], str], Any] = {
|
| 130 |
+
"root": body.root_block.graph
|
| 131 |
+
}
|
| 132 |
+
for k, v in body.subblocks.items():
|
| 133 |
+
self.graphs[k] = v.graph
|
| 134 |
+
|
| 135 |
+
def deduce_node_dtype_by_inputs(self, node: torch.fx.Node):
|
| 136 |
+
inputs = node.all_input_nodes
|
| 137 |
+
input_nodes = [
|
| 138 |
+
n for n in inputs if isinstance(n, torch.fx.Node) and n.op != "placeholder"
|
| 139 |
+
]
|
| 140 |
+
if len(input_nodes) == 0:
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
all_input_nodes_propogated = all(
|
| 144 |
+
OptimizationContext.key in n.meta
|
| 145 |
+
and n.meta[OptimizationContext.key].dtype is not None
|
| 146 |
+
for n in input_nodes
|
| 147 |
+
)
|
| 148 |
+
if not all_input_nodes_propogated:
|
| 149 |
+
return None
|
| 150 |
+
|
| 151 |
+
return functools.reduce(
|
| 152 |
+
torch.promote_types,
|
| 153 |
+
[n.meta[OptimizationContext.key].dtype for n in input_nodes],
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
def deduce_node_dtype_by_subgraph(self, node: torch.fx.Node):
|
| 157 |
+
sub_graph = self.graphs[node.target]
|
| 158 |
+
dtype = self.propagate_graph(sub_graph)
|
| 159 |
+
assert dtype
|
| 160 |
+
return dtype
|
| 161 |
+
|
| 162 |
+
def deduce_node_dtype(self, node: torch.fx.Node):
|
| 163 |
+
if node.target in boolean_ops():
|
| 164 |
+
return torch.bool
|
| 165 |
+
|
| 166 |
+
if node.op == "placeholder":
|
| 167 |
+
return None
|
| 168 |
+
|
| 169 |
+
if node.target == "output":
|
| 170 |
+
# we can infer output node if it only have 1 arg
|
| 171 |
+
if len(node.args) != 1:
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
if node.target in (
|
| 175 |
+
"to_dtype",
|
| 176 |
+
"index_expr",
|
| 177 |
+
):
|
| 178 |
+
return node.args[-1]
|
| 179 |
+
|
| 180 |
+
if node.target in (
|
| 181 |
+
"rand",
|
| 182 |
+
"randn",
|
| 183 |
+
):
|
| 184 |
+
return torch.float
|
| 185 |
+
|
| 186 |
+
if node.target in (
|
| 187 |
+
"get_index",
|
| 188 |
+
"index_expr",
|
| 189 |
+
):
|
| 190 |
+
return torch.int64
|
| 191 |
+
|
| 192 |
+
if node.target in (
|
| 193 |
+
"load",
|
| 194 |
+
"store",
|
| 195 |
+
"store_reduction",
|
| 196 |
+
):
|
| 197 |
+
buf_name = node.args[1]
|
| 198 |
+
return V.graph.get_dtype(buf_name)
|
| 199 |
+
|
| 200 |
+
if node.target == operator.getitem:
|
| 201 |
+
return self.deduce_node_dtype(node.args[0])
|
| 202 |
+
|
| 203 |
+
assert isinstance(node.target, str)
|
| 204 |
+
|
| 205 |
+
if node.target == "reduction":
|
| 206 |
+
return node.args[1]
|
| 207 |
+
|
| 208 |
+
if node.target == "constant":
|
| 209 |
+
return DTYPE_TO_COMPUTATION_DTYPE[node.args[-1]]
|
| 210 |
+
|
| 211 |
+
if node.target.startswith("masked_subblock"):
|
| 212 |
+
return self.deduce_node_dtype_by_subgraph(node)
|
| 213 |
+
|
| 214 |
+
return self.deduce_node_dtype_by_inputs(node)
|
| 215 |
+
|
| 216 |
+
def propagate_graph(self, graph: torch.fx.Graph):
|
| 217 |
+
assert graph.nodes
|
| 218 |
+
graph_dtype = None
|
| 219 |
+
# For masked_subblock, we use output's dtype to represent
|
| 220 |
+
# the dtype of this subgraph. For other cases, graph_dtype
|
| 221 |
+
# might be None
|
| 222 |
+
for node in graph.nodes:
|
| 223 |
+
if OptimizationContext.key in node.meta:
|
| 224 |
+
opt_ctx = node.meta[OptimizationContext.key]
|
| 225 |
+
else:
|
| 226 |
+
opt_ctx = OptimizationContext()
|
| 227 |
+
|
| 228 |
+
opt_ctx.dtype = self.deduce_node_dtype(node)
|
| 229 |
+
node.meta[OptimizationContext.key] = opt_ctx
|
| 230 |
+
if node.target == "output":
|
| 231 |
+
graph_dtype = opt_ctx.dtype
|
| 232 |
+
return graph_dtype
|
| 233 |
+
|
| 234 |
+
def propagate(self):
|
| 235 |
+
self.propagate_graph(self.graphs["root"])
|
| 236 |
+
|
| 237 |
+
@classmethod
|
| 238 |
+
def propagate_loopbody(cls, body):
|
| 239 |
+
return cls(body).propagate()
|
| 240 |
+
|
| 241 |
+
@classmethod
|
| 242 |
+
def propagate_scheduler_node(cls, node):
|
| 243 |
+
from ..ir import LoopBody
|
| 244 |
+
from ..scheduler import SchedulerNode
|
| 245 |
+
|
| 246 |
+
assert isinstance(node, SchedulerNode)
|
| 247 |
+
assert isinstance(node._body, LoopBody)
|
| 248 |
+
DataTypePropagation.propagate_loopbody(node._body)
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
class ExprPrinter(Printer):
|
| 252 |
+
@staticmethod
|
| 253 |
+
def paren(string):
|
| 254 |
+
def all_in_parens(string):
|
| 255 |
+
if string[0] != "(" or len(string) < 2:
|
| 256 |
+
return False
|
| 257 |
+
count = 1
|
| 258 |
+
for i, char in enumerate(string[1:]):
|
| 259 |
+
if char == "(":
|
| 260 |
+
count += 1
|
| 261 |
+
elif char == ")":
|
| 262 |
+
count -= 1
|
| 263 |
+
if count == 0 and i != len(string) - 2:
|
| 264 |
+
return False
|
| 265 |
+
assert count == 0
|
| 266 |
+
return True
|
| 267 |
+
|
| 268 |
+
if (
|
| 269 |
+
isinstance(string, CSEVariable)
|
| 270 |
+
or re.match(r"^[a-z0-9_.]+$", string, re.I)
|
| 271 |
+
or re.match(r"^\([^)]*\)$", string, re.I)
|
| 272 |
+
or string == ""
|
| 273 |
+
):
|
| 274 |
+
return string
|
| 275 |
+
# don't put extra parens for strings that are already wrapped in parens
|
| 276 |
+
if all_in_parens(string):
|
| 277 |
+
return string
|
| 278 |
+
return f"({string})"
|
| 279 |
+
|
| 280 |
+
def _print_Pow(self, expr):
|
| 281 |
+
# Pow() confuses triton
|
| 282 |
+
base, exp = expr.args
|
| 283 |
+
# NB: Remember this is sizevar computation! You don't typically
|
| 284 |
+
# expect to have to do floating point computation including exponents
|
| 285 |
+
# in sizevar compute. Instead of adding support for floating
|
| 286 |
+
# point pow, you should make upstream retranslate the Sympy expression
|
| 287 |
+
# into Tensor expressions earlier and do that instead.
|
| 288 |
+
if exp == 0.5:
|
| 289 |
+
return self._helper_sqrt(base) # type: ignore[attr-defined]
|
| 290 |
+
elif exp == -0.5:
|
| 291 |
+
return "1/" + self._helper_sqrt(base) # type: ignore[attr-defined]
|
| 292 |
+
base = self._print(base)
|
| 293 |
+
assert exp == int(exp), exp
|
| 294 |
+
exp = int(exp)
|
| 295 |
+
if exp > 0:
|
| 296 |
+
return "*".join([self.paren(base)] * exp)
|
| 297 |
+
elif exp < 0:
|
| 298 |
+
return "1/" + self.paren("*".join([self.paren(base)] * abs(exp)))
|
| 299 |
+
else: # exp == 0
|
| 300 |
+
return "1"
|
| 301 |
+
|
| 302 |
+
def _print_Unequality(self, expr):
|
| 303 |
+
return " != ".join(map(self.paren, map(self._print, expr.args)))
|
| 304 |
+
|
| 305 |
+
def _print_Mul(self, expr):
|
| 306 |
+
return "*".join(map(self.paren, map(self._print, expr.args)))
|
| 307 |
+
|
| 308 |
+
def _print_Add(self, expr):
|
| 309 |
+
return " + ".join(map(self.paren, map(self._print, expr.args)))
|
| 310 |
+
|
| 311 |
+
def _print_Mod(self, expr):
|
| 312 |
+
return " % ".join(map(self.paren, map(self._print, expr.args)))
|
| 313 |
+
|
| 314 |
+
def _print_CleanDiv(self, expr):
|
| 315 |
+
return self._print_FloorDiv(expr) # type: ignore[attr-defined]
|
| 316 |
+
|
| 317 |
+
def _print_GreaterThan(self, expr):
|
| 318 |
+
# GreaterThan: >=
|
| 319 |
+
# StrictlyGreaterThan: >
|
| 320 |
+
# Go figure...
|
| 321 |
+
return " >= ".join(map(self.paren, map(self._print, expr.args)))
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
class PythonPrinter(ExprPrinter):
|
| 325 |
+
def _print_ModularIndexing(self, expr):
|
| 326 |
+
x, div, mod = expr.args
|
| 327 |
+
x = self.paren(self.doprint(x))
|
| 328 |
+
div = self.paren(self.doprint(div))
|
| 329 |
+
mod = self.paren(self.doprint(mod))
|
| 330 |
+
if div != "1":
|
| 331 |
+
x = f"({x} // {div})"
|
| 332 |
+
return f"{x} % {mod}"
|
| 333 |
+
|
| 334 |
+
def _print_FloorDiv(self, expr):
|
| 335 |
+
x, div = expr.args
|
| 336 |
+
x = self.paren(self.doprint(x))
|
| 337 |
+
div = self.paren(self.doprint(div))
|
| 338 |
+
return f"({x} // {div})"
|
| 339 |
+
|
| 340 |
+
def _helper_sqrt(self, expr):
|
| 341 |
+
return f"math.sqrt({self._print(expr)})"
|
| 342 |
+
|
| 343 |
+
def _print_floor(self, expr):
|
| 344 |
+
assert len(expr.args) == 1
|
| 345 |
+
return f"math.floor({self._print(expr.args[0])})"
|
| 346 |
+
|
| 347 |
+
def _print_ceiling(self, expr):
|
| 348 |
+
assert len(expr.args) == 1
|
| 349 |
+
return f"math.ceil({self._print(expr.args[0])})"
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class OpOverrides:
|
| 353 |
+
def __init__(self, parent):
|
| 354 |
+
super().__init__()
|
| 355 |
+
self._parent = parent
|
| 356 |
+
|
| 357 |
+
def __getattr__(self, item):
|
| 358 |
+
return getattr(self._parent, item)
|
| 359 |
+
|
| 360 |
+
@staticmethod
|
| 361 |
+
def identity(value):
|
| 362 |
+
# used to trigger cse
|
| 363 |
+
return value
|
| 364 |
+
|
| 365 |
+
@staticmethod
|
| 366 |
+
def constant(value, dtype):
|
| 367 |
+
return repr(value)
|
| 368 |
+
|
| 369 |
+
@staticmethod
|
| 370 |
+
def reciprocal(x):
|
| 371 |
+
return ops.div("1", x)
|
| 372 |
+
|
| 373 |
+
@staticmethod
|
| 374 |
+
def square(x):
|
| 375 |
+
return ops.mul(x, x)
|
| 376 |
+
|
| 377 |
+
@staticmethod
|
| 378 |
+
def bitwise_not(x):
|
| 379 |
+
return f"~{ExprPrinter.paren(x)}"
|
| 380 |
+
|
| 381 |
+
@staticmethod
|
| 382 |
+
def logical_not(a):
|
| 383 |
+
return f"{ExprPrinter.paren(a)} == 0"
|
| 384 |
+
|
| 385 |
+
@staticmethod
|
| 386 |
+
def bitwise_and(x, y):
|
| 387 |
+
return f"{ExprPrinter.paren(x)} & {ExprPrinter.paren(y)}"
|
| 388 |
+
|
| 389 |
+
@staticmethod
|
| 390 |
+
def bitwise_or(x, y):
|
| 391 |
+
return f"{ExprPrinter.paren(x)} | {ExprPrinter.paren(y)}"
|
| 392 |
+
|
| 393 |
+
@staticmethod
|
| 394 |
+
def bitwise_xor(x, y):
|
| 395 |
+
return f"{ExprPrinter.paren(x)} ^ {ExprPrinter.paren(y)}"
|
| 396 |
+
|
| 397 |
+
@staticmethod
|
| 398 |
+
def bitwise_left_shift(x, y):
|
| 399 |
+
return f"{ExprPrinter.paren(x)} << {ExprPrinter.paren(y)}"
|
| 400 |
+
|
| 401 |
+
# TODO(fdrocha): this is currently not being used anywhere,
|
| 402 |
+
# pending on moving triton pin past 972b761
|
| 403 |
+
@staticmethod
|
| 404 |
+
def bitwise_right_shift(x, y):
|
| 405 |
+
return f"{ExprPrinter.paren(x)} >> {ExprPrinter.paren(y)}"
|
| 406 |
+
|
| 407 |
+
@staticmethod
|
| 408 |
+
def remainder(a, b):
|
| 409 |
+
r = ops.mod(a, b)
|
| 410 |
+
return ops.where(f"(({r} != 0) & (({r} < 0) != ({b} < 0)))", ops.add(r, b), r)
|
| 411 |
+
|
| 412 |
+
@staticmethod
|
| 413 |
+
def load_seed(name, offset):
|
| 414 |
+
return ops.load(name, sympy.Integer(offset))
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
class DeferredLine(DeferredLineBase):
|
| 418 |
+
"""A line that can be 'unwritten' by adding name to V.graph.removed_buffers"""
|
| 419 |
+
|
| 420 |
+
def __init__(self, name, line):
|
| 421 |
+
super().__init__(line)
|
| 422 |
+
self.name = name
|
| 423 |
+
|
| 424 |
+
def __call__(self):
|
| 425 |
+
if (
|
| 426 |
+
self.name not in V.graph.removed_buffers
|
| 427 |
+
and self.name not in V.graph.inplaced_to_remove
|
| 428 |
+
):
|
| 429 |
+
return self.line
|
| 430 |
+
return None
|
| 431 |
+
|
| 432 |
+
def _new_line(self, line):
|
| 433 |
+
return DeferredLine(self.name, line)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class BracesBuffer(IndentedBuffer):
|
| 437 |
+
def indent(self, offset=1):
|
| 438 |
+
@contextlib.contextmanager
|
| 439 |
+
def ctx():
|
| 440 |
+
for _ in range(offset):
|
| 441 |
+
self.writeline("{")
|
| 442 |
+
self._indent += 1
|
| 443 |
+
for _ in range(-offset):
|
| 444 |
+
self._indent -= 1
|
| 445 |
+
self.writeline("}")
|
| 446 |
+
yield
|
| 447 |
+
for _ in range(-offset):
|
| 448 |
+
self.writeline("{")
|
| 449 |
+
self._indent += 1
|
| 450 |
+
for _ in range(offset):
|
| 451 |
+
self._indent -= 1
|
| 452 |
+
self.writeline("}")
|
| 453 |
+
|
| 454 |
+
return ctx()
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
class InplacedBuffer(NamedTuple):
|
| 458 |
+
inner_name: str
|
| 459 |
+
other_names: List[str]
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class KernelArgs:
|
| 463 |
+
@staticmethod
|
| 464 |
+
def _lookup(prefix, odict, name):
|
| 465 |
+
assert isinstance(name, (str, sympy.Symbol))
|
| 466 |
+
if name not in odict:
|
| 467 |
+
odict[name] = f"{prefix}{len(odict)}"
|
| 468 |
+
return odict[name]
|
| 469 |
+
|
| 470 |
+
def __init__(self, sizevars=None):
|
| 471 |
+
self.input_buffers = dict()
|
| 472 |
+
self.output_buffers = dict()
|
| 473 |
+
self.inplace_buffers = dict()
|
| 474 |
+
self.sizevars = sizevars or dict()
|
| 475 |
+
|
| 476 |
+
def __repr__(self):
|
| 477 |
+
return "KernelArgs({})".format(
|
| 478 |
+
", ".join(
|
| 479 |
+
map(
|
| 480 |
+
repr,
|
| 481 |
+
[
|
| 482 |
+
self.input_buffers,
|
| 483 |
+
self.output_buffers,
|
| 484 |
+
self.inplace_buffers,
|
| 485 |
+
self.sizevars,
|
| 486 |
+
],
|
| 487 |
+
)
|
| 488 |
+
)
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
def _buffer_is_marked_removed(self, name):
|
| 492 |
+
return isinstance(name, str) and name.startswith("REMOVED")
|
| 493 |
+
|
| 494 |
+
def input(self, name):
|
| 495 |
+
if V.graph.scheduler:
|
| 496 |
+
name = V.graph.scheduler.mutation_real_name.get(name, name)
|
| 497 |
+
assert name not in V.graph.removed_buffers, name
|
| 498 |
+
if name in self.output_buffers:
|
| 499 |
+
return self.output_buffers[name]
|
| 500 |
+
if name in self.inplace_buffers:
|
| 501 |
+
return self.inplace_buffers[name].inner_name
|
| 502 |
+
if name.startswith("seed"):
|
| 503 |
+
return self._lookup("seed", self.input_buffers, name)
|
| 504 |
+
return self._lookup("in_ptr", self.input_buffers, name)
|
| 505 |
+
|
| 506 |
+
def output(self, name):
|
| 507 |
+
if V.graph.scheduler:
|
| 508 |
+
name = V.graph.scheduler.mutation_real_name.get(name, name)
|
| 509 |
+
assert name not in V.graph.removed_buffers, name
|
| 510 |
+
if name in self.inplace_buffers:
|
| 511 |
+
return self.inplace_buffers[name].inner_name
|
| 512 |
+
return self._lookup("out_ptr", self.output_buffers, name)
|
| 513 |
+
|
| 514 |
+
def make_inplace(self, input_name, output_name):
|
| 515 |
+
assert output_name not in self.inplace_buffers
|
| 516 |
+
if input_name in self.inplace_buffers:
|
| 517 |
+
buf = self.inplace_buffers[input_name]
|
| 518 |
+
buf.other_names.append(output_name)
|
| 519 |
+
self.inplace_buffers[output_name] = buf
|
| 520 |
+
else:
|
| 521 |
+
buf = InplacedBuffer(
|
| 522 |
+
f"in_out_ptr{len(unique(self.inplace_buffers.values()))}",
|
| 523 |
+
[input_name, output_name],
|
| 524 |
+
)
|
| 525 |
+
self.inplace_buffers[input_name] = buf
|
| 526 |
+
self.inplace_buffers[output_name] = buf
|
| 527 |
+
|
| 528 |
+
def seed_offset(self, name, value):
|
| 529 |
+
if value in self.sizevars:
|
| 530 |
+
return self.sizevars[value]
|
| 531 |
+
if name in self.sizevars.values():
|
| 532 |
+
name = (
|
| 533 |
+
f"{name}{sum(1 for v in self.sizevars.values() if v.startswith(name))}"
|
| 534 |
+
)
|
| 535 |
+
self.sizevars[value] = name
|
| 536 |
+
return name
|
| 537 |
+
|
| 538 |
+
def size(self, name):
|
| 539 |
+
if str(name) == "seed":
|
| 540 |
+
self.sizevars["seed"] = "seed"
|
| 541 |
+
return "seed"
|
| 542 |
+
return self._lookup("ks", self.sizevars, name)
|
| 543 |
+
|
| 544 |
+
def call_names(self):
|
| 545 |
+
return chain(
|
| 546 |
+
self.input_buffers.keys(), self.output_buffers.keys(), self.sizevars.keys()
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
def wrap_ptr_arg(self, buf, dtype):
|
| 550 |
+
return f"c_void_p({buf}.data_ptr())"
|
| 551 |
+
|
| 552 |
+
def wrap_size_arg(self, size):
|
| 553 |
+
return f"c_long({size})"
|
| 554 |
+
|
| 555 |
+
def cpp_argdefs(self):
|
| 556 |
+
from .cpp import DTYPE_TO_CPP, INDEX_TYPE
|
| 557 |
+
|
| 558 |
+
# TODO(jansel): replace this with data from scheduler
|
| 559 |
+
buffer_types = {x.get_name(): x.get_dtype() for x in V.graph.buffers}
|
| 560 |
+
for name, val in V.graph.graph_inputs.items():
|
| 561 |
+
if isinstance(val, sympy.Expr):
|
| 562 |
+
buffer_types[name] = get_sympy_Expr_dtype(val)
|
| 563 |
+
else:
|
| 564 |
+
buffer_types[name] = val.get_dtype()
|
| 565 |
+
buffer_types.update(
|
| 566 |
+
{name: val.dtype for name, val in V.graph.constants.items()}
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
call_args = []
|
| 570 |
+
arg_defs = []
|
| 571 |
+
arg_types = []
|
| 572 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 573 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 574 |
+
continue
|
| 575 |
+
outer = inplaced.other_names[-1]
|
| 576 |
+
inner = inplaced.inner_name
|
| 577 |
+
dtype = buffer_types[outer]
|
| 578 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 579 |
+
arg_defs.append(f"{cpp_dtype}* {inner}")
|
| 580 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 581 |
+
arg_types.append(f"{cpp_dtype}*")
|
| 582 |
+
for outer, inner in self.input_buffers.items():
|
| 583 |
+
if outer in self.inplace_buffers:
|
| 584 |
+
continue
|
| 585 |
+
dtype = buffer_types[outer]
|
| 586 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 587 |
+
arg_defs.append(f"const {cpp_dtype}* {inner}")
|
| 588 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 589 |
+
arg_types.append(f"const {cpp_dtype}*")
|
| 590 |
+
for outer, inner in self.output_buffers.items():
|
| 591 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 592 |
+
continue
|
| 593 |
+
dtype = buffer_types[outer]
|
| 594 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 595 |
+
arg_defs.append(f"{cpp_dtype}* {inner}")
|
| 596 |
+
call_args.append(self.wrap_ptr_arg(outer, dtype))
|
| 597 |
+
arg_types.append(f"{cpp_dtype}*")
|
| 598 |
+
for outer, inner in self.sizevars.items():
|
| 599 |
+
arg_defs.append(f"const {INDEX_TYPE} {inner}")
|
| 600 |
+
call_args.append(self.wrap_size_arg(outer))
|
| 601 |
+
arg_types.append(f"const {INDEX_TYPE}")
|
| 602 |
+
return arg_defs, call_args, arg_types
|
| 603 |
+
|
| 604 |
+
def python_argdefs(self):
|
| 605 |
+
arg_defs = []
|
| 606 |
+
call_args = []
|
| 607 |
+
precompile_args: List[Union[TensorArg, SizeArg]] = []
|
| 608 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 609 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 610 |
+
continue
|
| 611 |
+
arg_defs.append(inplaced.inner_name)
|
| 612 |
+
call_args.append(inplaced.other_names[-1])
|
| 613 |
+
precompile_args.append(
|
| 614 |
+
TensorArg(
|
| 615 |
+
inplaced.inner_name,
|
| 616 |
+
inplaced.other_names[-1],
|
| 617 |
+
V.graph.get_dtype(inplaced.other_names[-1]),
|
| 618 |
+
)
|
| 619 |
+
)
|
| 620 |
+
for outer, inner in chain(
|
| 621 |
+
self.input_buffers.items(), self.output_buffers.items()
|
| 622 |
+
):
|
| 623 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 624 |
+
continue
|
| 625 |
+
arg_defs.append(inner)
|
| 626 |
+
call_args.append(outer)
|
| 627 |
+
precompile_args.append(TensorArg(inner, outer, V.graph.get_dtype(outer)))
|
| 628 |
+
for outer, inner in self.sizevars.items():
|
| 629 |
+
arg_defs.append(inner)
|
| 630 |
+
call_args.append(outer)
|
| 631 |
+
precompile_args.append(SizeArg(inner, outer))
|
| 632 |
+
|
| 633 |
+
return arg_defs, call_args, precompile_args
|
| 634 |
+
|
| 635 |
+
def aliases(self):
|
| 636 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 637 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 638 |
+
continue
|
| 639 |
+
for other in inplaced.other_names:
|
| 640 |
+
if other in V.graph.inplaced_to_remove:
|
| 641 |
+
continue
|
| 642 |
+
if other in self.input_buffers:
|
| 643 |
+
yield self.input_buffers[other], inplaced.inner_name
|
| 644 |
+
if other in self.output_buffers:
|
| 645 |
+
yield self.output_buffers[other], inplaced.inner_name
|
| 646 |
+
|
| 647 |
+
def is_removed(self, name):
|
| 648 |
+
def _is_removed(name, buffers):
|
| 649 |
+
return name not in buffers or self._buffer_is_marked_removed(buffers[name])
|
| 650 |
+
|
| 651 |
+
return _is_removed(name, self.output_buffers) and _is_removed(
|
| 652 |
+
name, self.inplace_buffers
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
# Includes inplace buffers, excludes removed buffers. Essentially,
|
| 656 |
+
# after you do a call into this kernel, which buffers actually contain
|
| 657 |
+
# updated data? Modeled off of python_argdefs.
|
| 658 |
+
def live_output_buffers(self):
|
| 659 |
+
live_outs = set()
|
| 660 |
+
for inplaced in unique(self.inplace_buffers.values()):
|
| 661 |
+
if self._buffer_is_marked_removed(inplaced):
|
| 662 |
+
continue
|
| 663 |
+
live_outs.add(inplaced.other_names[-1])
|
| 664 |
+
for outer, inner in self.output_buffers.items():
|
| 665 |
+
if outer in self.inplace_buffers or self._buffer_is_marked_removed(inner):
|
| 666 |
+
continue
|
| 667 |
+
live_outs.add(outer)
|
| 668 |
+
return live_outs
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
class CSEVariable:
|
| 672 |
+
"""A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis.
|
| 673 |
+
To do so, the backends can simply overload `Kernel.create_cse_var`
|
| 674 |
+
The "CSEVariable.update_on_args" method gives you a hook for annotations
|
| 675 |
+
See example of TritonCSEVariable in triton.py
|
| 676 |
+
"""
|
| 677 |
+
|
| 678 |
+
def __init__(self, name, bounds: ValueRanges):
|
| 679 |
+
assert isinstance(bounds, ValueRanges)
|
| 680 |
+
self.name = name
|
| 681 |
+
self.bounds = bounds
|
| 682 |
+
|
| 683 |
+
def __str__(self):
|
| 684 |
+
return self.name
|
| 685 |
+
|
| 686 |
+
def __hash__(self) -> int:
|
| 687 |
+
return hash(self.name)
|
| 688 |
+
|
| 689 |
+
def __eq__(self, other) -> bool:
|
| 690 |
+
return type(other) == type(self) and other.name == self.name
|
| 691 |
+
|
| 692 |
+
def update_on_args(self, name, args, kwargs):
|
| 693 |
+
pass
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
class CppWrapperKernelArgs(KernelArgs):
|
| 697 |
+
def wrap_ptr_arg(self, buf, dtype):
|
| 698 |
+
from .cpp import DTYPE_TO_CPP
|
| 699 |
+
|
| 700 |
+
return f"({DTYPE_TO_CPP[dtype]}*)({buf}.data_ptr())"
|
| 701 |
+
|
| 702 |
+
def wrap_size_arg(self, size):
|
| 703 |
+
return f"{size}"
|
| 704 |
+
|
| 705 |
+
|
| 706 |
+
class CSE:
|
| 707 |
+
"""Common subexpression elimination"""
|
| 708 |
+
|
| 709 |
+
def __init__(
|
| 710 |
+
self,
|
| 711 |
+
prefix="",
|
| 712 |
+
suffix="",
|
| 713 |
+
name_prefix="tmp",
|
| 714 |
+
iter_buffers=None,
|
| 715 |
+
store_cache=None,
|
| 716 |
+
reduction_cache=None,
|
| 717 |
+
varname_map=None,
|
| 718 |
+
):
|
| 719 |
+
self.prefix = prefix
|
| 720 |
+
self.suffix = suffix
|
| 721 |
+
self.cache = {}
|
| 722 |
+
self.name_prefix = name_prefix
|
| 723 |
+
self.store_cache = store_cache or {}
|
| 724 |
+
self.reduction_cache = reduction_cache or {}
|
| 725 |
+
self.iter_buffer_ids = iter_buffers or itertools.count()
|
| 726 |
+
self.invalidated_stores = set()
|
| 727 |
+
self.varname_map = varname_map or {}
|
| 728 |
+
|
| 729 |
+
def invalidate(self, keep_vars: Set[str]):
|
| 730 |
+
for name, tmp in list(self.store_cache.items()):
|
| 731 |
+
if tmp not in keep_vars:
|
| 732 |
+
del self.store_cache[name]
|
| 733 |
+
self.invalidated_stores.add(name)
|
| 734 |
+
self.cache = {k: v for k, v in self.cache.items() if v in keep_vars}
|
| 735 |
+
|
| 736 |
+
def clone(self):
|
| 737 |
+
# Note(fdrocha): reduction_cache is not being cloned, not sure if this is intentional
|
| 738 |
+
return CSE(
|
| 739 |
+
prefix=self.prefix,
|
| 740 |
+
suffix=self.suffix,
|
| 741 |
+
name_prefix=self.name_prefix,
|
| 742 |
+
iter_buffers=self.iter_buffer_ids,
|
| 743 |
+
store_cache=self.store_cache,
|
| 744 |
+
varname_map=self.varname_map,
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
def generate(
|
| 748 |
+
self,
|
| 749 |
+
buffer: IndentedBuffer,
|
| 750 |
+
expr: Union[str, CSEVariable, OpsValue],
|
| 751 |
+
*,
|
| 752 |
+
bounds: ValueRanges = ValueRanges.unknown(),
|
| 753 |
+
write=True,
|
| 754 |
+
assignment=True,
|
| 755 |
+
) -> CSEVariable:
|
| 756 |
+
if isinstance(expr, OpsValue):
|
| 757 |
+
expr = expr.value
|
| 758 |
+
|
| 759 |
+
assert isinstance(expr, (str, CSEVariable)), type(expr)
|
| 760 |
+
assert write or assignment
|
| 761 |
+
if isinstance(expr, CSEVariable):
|
| 762 |
+
# If the expressions were always created with all the information, we could
|
| 763 |
+
# assert expr.bounds == bounds, but sometimes the expression is created
|
| 764 |
+
# with the loose ValueRanges.unknown(), so we need to tighten the bounds
|
| 765 |
+
expr.bounds = expr.bounds.tighten(bounds)
|
| 766 |
+
return expr
|
| 767 |
+
cache_key = expr
|
| 768 |
+
var = self.cache.get(cache_key, None)
|
| 769 |
+
if not var:
|
| 770 |
+
var = self.newvar(bounds) if assignment else None
|
| 771 |
+
self.cache[cache_key] = var
|
| 772 |
+
if write:
|
| 773 |
+
if V.kernel.current_node:
|
| 774 |
+
V.kernel.current_node.codegen_originating_info(
|
| 775 |
+
buffer, only_once=True
|
| 776 |
+
)
|
| 777 |
+
if assignment:
|
| 778 |
+
line = f"{self.prefix}{var} = {expr}{self.suffix}"
|
| 779 |
+
else:
|
| 780 |
+
line = f"{expr}{self.suffix}"
|
| 781 |
+
buffer.writeline(line)
|
| 782 |
+
else:
|
| 783 |
+
var.bounds = var.bounds.tighten(bounds)
|
| 784 |
+
|
| 785 |
+
return var
|
| 786 |
+
|
| 787 |
+
def newvar(self, bounds: ValueRanges = ValueRanges.unknown()) -> CSEVariable:
|
| 788 |
+
var_name = f"{self.name_prefix}{next(self.iter_buffer_ids)}"
|
| 789 |
+
var = V.kernel.create_cse_var(var_name, bounds)
|
| 790 |
+
self.varname_map[var_name] = var
|
| 791 |
+
return var
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
class CodeGen:
|
| 795 |
+
def __init__(self):
|
| 796 |
+
super().__init__()
|
| 797 |
+
self.exit_stack = contextlib.ExitStack()
|
| 798 |
+
|
| 799 |
+
def __enter__(self):
|
| 800 |
+
self.exit_stack.__enter__()
|
| 801 |
+
return self
|
| 802 |
+
|
| 803 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 804 |
+
self.exit_stack.__exit__(exc_type, exc_val, exc_tb)
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
class Kernel(CodeGen):
|
| 808 |
+
newvar_prefix = ""
|
| 809 |
+
suffix = ""
|
| 810 |
+
overrides = None
|
| 811 |
+
load_format = None
|
| 812 |
+
store_format = None
|
| 813 |
+
|
| 814 |
+
def __init__(self, args=None):
|
| 815 |
+
super().__init__()
|
| 816 |
+
metrics.generated_kernel_count += 1
|
| 817 |
+
self.args = args or KernelArgs()
|
| 818 |
+
self.loads = IndentedBuffer()
|
| 819 |
+
self.compute = IndentedBuffer()
|
| 820 |
+
self.stores = IndentedBuffer()
|
| 821 |
+
self.cse = CSE(self.newvar_prefix, self.suffix)
|
| 822 |
+
self.must_keep_buffers = set()
|
| 823 |
+
self.store_buffer_names = set()
|
| 824 |
+
# set in set_current_node
|
| 825 |
+
self.current_node = None
|
| 826 |
+
self.node_to_bounds: Optional[Dict[torch.fx.Node, ValueRanges]] = None
|
| 827 |
+
|
| 828 |
+
@contextlib.contextmanager
|
| 829 |
+
def set_current_node(self, node):
|
| 830 |
+
prior = self.current_node
|
| 831 |
+
self.current_node = node
|
| 832 |
+
self.node_to_bounds = node._body.bounds().get_bounds()
|
| 833 |
+
try:
|
| 834 |
+
yield
|
| 835 |
+
finally:
|
| 836 |
+
self.current_node = prior
|
| 837 |
+
|
| 838 |
+
@contextlib.contextmanager
|
| 839 |
+
def swap_buffers(self, lb, cb=None, sb=None):
|
| 840 |
+
if cb is None:
|
| 841 |
+
cb = lb
|
| 842 |
+
loads = self.loads
|
| 843 |
+
compute = self.compute
|
| 844 |
+
stores = self.stores
|
| 845 |
+
cse = self.cse
|
| 846 |
+
self.loads = lb
|
| 847 |
+
self.compute = cb
|
| 848 |
+
self.stores = sb
|
| 849 |
+
self.cse = cse.clone()
|
| 850 |
+
try:
|
| 851 |
+
yield
|
| 852 |
+
finally:
|
| 853 |
+
self.loads = loads
|
| 854 |
+
self.compute = compute
|
| 855 |
+
self.stores = stores
|
| 856 |
+
self.cse = cse
|
| 857 |
+
|
| 858 |
+
def load(self, name: str, index: sympy.Expr):
|
| 859 |
+
raise NotImplementedError()
|
| 860 |
+
|
| 861 |
+
def indirect_load(self, name: str, index: sympy.Expr):
|
| 862 |
+
"""A load the depends on an index we have read"""
|
| 863 |
+
prior = self.loads
|
| 864 |
+
try:
|
| 865 |
+
# put the load in the compute section as it might have deps
|
| 866 |
+
self.loads = self.compute
|
| 867 |
+
return self.load(name, index)
|
| 868 |
+
finally:
|
| 869 |
+
self.loads = prior
|
| 870 |
+
|
| 871 |
+
def store_reduction(self, name, index, value):
|
| 872 |
+
raise NotImplementedError()
|
| 873 |
+
|
| 874 |
+
def store(self, name, index, value, mode=None):
|
| 875 |
+
raise NotImplementedError()
|
| 876 |
+
|
| 877 |
+
def reduction(self, dtype, src_dtype, reduction_type, value):
|
| 878 |
+
raise NotImplementedError()
|
| 879 |
+
|
| 880 |
+
def bucketize(
|
| 881 |
+
self,
|
| 882 |
+
values,
|
| 883 |
+
offsets_name: str,
|
| 884 |
+
offsets_size: sympy.Expr,
|
| 885 |
+
indexing_dtype: torch.dtype,
|
| 886 |
+
right: bool,
|
| 887 |
+
):
|
| 888 |
+
"""
|
| 889 |
+
See [Note: Inductor bucketize op]
|
| 890 |
+
"""
|
| 891 |
+
raise NotImplementedError()
|
| 892 |
+
|
| 893 |
+
def __enter__(self):
|
| 894 |
+
class CSEProxy:
|
| 895 |
+
self.name = "CSEProxy"
|
| 896 |
+
|
| 897 |
+
@staticmethod
|
| 898 |
+
def __getattr__(name: str) -> Callable[..., CSEVariable]: # type: ignore[misc]
|
| 899 |
+
def inner(*args, **kwargs):
|
| 900 |
+
# TritonTemplateKernel has no current_node
|
| 901 |
+
buf_bounds = ValueRanges.unknown()
|
| 902 |
+
if hasattr(V.interpreter, "current_node"):
|
| 903 |
+
fx_node = V.interpreter.current_node
|
| 904 |
+
assert isinstance(self.node_to_bounds, dict)
|
| 905 |
+
buf_bounds = self.node_to_bounds.get(
|
| 906 |
+
fx_node, ValueRanges.unknown()
|
| 907 |
+
)
|
| 908 |
+
|
| 909 |
+
csevar = self.cse.generate(
|
| 910 |
+
self.compute,
|
| 911 |
+
getattr(parent_handler, name)(*args, **kwargs), # type: ignore[has-type]
|
| 912 |
+
bounds=buf_bounds,
|
| 913 |
+
)
|
| 914 |
+
csevar.update_on_args(name, args, kwargs)
|
| 915 |
+
return csevar
|
| 916 |
+
|
| 917 |
+
return inner
|
| 918 |
+
|
| 919 |
+
@staticmethod
|
| 920 |
+
def indirect_indexing(index_var, size, check=True):
|
| 921 |
+
# Skip CSE since this doesn't return an expression
|
| 922 |
+
return self.indirect_indexing(index_var, size, check) # type: ignore[attr-defined]
|
| 923 |
+
|
| 924 |
+
@staticmethod
|
| 925 |
+
def load(name: str, index: sympy.Expr):
|
| 926 |
+
if name in self.cse.invalidated_stores:
|
| 927 |
+
# A load from an invalidated store requires us to
|
| 928 |
+
# keep the actual buffer around
|
| 929 |
+
V.kernel.must_keep_buffers.add(name)
|
| 930 |
+
if free_symbol_startswith(index, "tmp"):
|
| 931 |
+
return self.indirect_load(name, index)
|
| 932 |
+
store_cache = self.cse.store_cache
|
| 933 |
+
if name in store_cache:
|
| 934 |
+
return store_cache[name]
|
| 935 |
+
return self.load(name, index)
|
| 936 |
+
|
| 937 |
+
@staticmethod
|
| 938 |
+
def store(name, index, value, mode=None):
|
| 939 |
+
self.store_buffer_names.add(name)
|
| 940 |
+
if mode is None:
|
| 941 |
+
self.cse.store_cache[name] = value
|
| 942 |
+
if self.current_node:
|
| 943 |
+
for other_name in self.current_node.get_mutations():
|
| 944 |
+
self.cse.store_cache[other_name] = value
|
| 945 |
+
if name not in V.graph.removed_buffers:
|
| 946 |
+
return self.store(name, index, value, mode=mode)
|
| 947 |
+
|
| 948 |
+
@staticmethod
|
| 949 |
+
def store_reduction(name, index, value):
|
| 950 |
+
self.store_buffer_names.add(name)
|
| 951 |
+
self.cse.store_cache[name] = value
|
| 952 |
+
if self.current_node:
|
| 953 |
+
for other_name in self.current_node.get_mutations():
|
| 954 |
+
self.cse.store_cache[other_name] = value
|
| 955 |
+
|
| 956 |
+
if name not in V.graph.removed_buffers:
|
| 957 |
+
return self.store_reduction(name, index, value)
|
| 958 |
+
|
| 959 |
+
@staticmethod
|
| 960 |
+
def reduction(dtype, src_dtype, reduction_type, value):
|
| 961 |
+
return self.reduction(dtype, src_dtype, reduction_type, value)
|
| 962 |
+
|
| 963 |
+
@staticmethod
|
| 964 |
+
def bucketize(
|
| 965 |
+
values,
|
| 966 |
+
offsets_name: str,
|
| 967 |
+
offsets_size: sympy.Expr,
|
| 968 |
+
indexing_dtype: torch.dtype,
|
| 969 |
+
right: bool,
|
| 970 |
+
):
|
| 971 |
+
"""
|
| 972 |
+
[Note: Inductor bucketize op]
|
| 973 |
+
|
| 974 |
+
Given values (tensor) and offsets_name (reference to the name of a 1D
|
| 975 |
+
tensor), calculate the bucket that each value belongs to.
|
| 976 |
+
|
| 977 |
+
e.g. for values [-1, 0, 1, 2, 3, 4, 5, 9], offsets [0, 4, 4, 8], right=True
|
| 978 |
+
return = [ 0, 1, 1, 1, 1, 3, 3, 4].
|
| 979 |
+
|
| 980 |
+
When right == False, bucket i refers to range (offsets[i], offsets[i+1]].
|
| 981 |
+
When right == True, bucket i refers to range [offsets[i], offsets[i+1]).
|
| 982 |
+
|
| 983 |
+
Offsets must be non-decreasing or the result is undefined.
|
| 984 |
+
"""
|
| 985 |
+
return self.bucketize(
|
| 986 |
+
values, offsets_name, offsets_size, indexing_dtype, right
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
super().__enter__()
|
| 990 |
+
assert self.overrides
|
| 991 |
+
parent_handler = self.overrides(V.get_ops_handler())
|
| 992 |
+
self.exit_stack.enter_context(V.set_ops_handler(CSEProxy()))
|
| 993 |
+
self.exit_stack.enter_context(V.set_kernel_handler(self))
|
| 994 |
+
return self
|
| 995 |
+
|
| 996 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 997 |
+
if V.graph.scheduler:
|
| 998 |
+
V.graph.scheduler.remove_kernel_local_buffers()
|
| 999 |
+
super().__exit__(exc_type, exc_val, exc_tb)
|
| 1000 |
+
|
| 1001 |
+
def rename_indexing(self, index) -> sympy.Expr:
|
| 1002 |
+
# adds the necessary kernel args for index expressions
|
| 1003 |
+
# and renames variables in index expressions to kernel arg names
|
| 1004 |
+
if isinstance(index, (list, tuple)):
|
| 1005 |
+
return [self.rename_indexing(x) for x in index]
|
| 1006 |
+
index = V.graph.sizevars.simplify(index)
|
| 1007 |
+
sorted_symbols = sorted(index.free_symbols, key=lambda s: s.name)
|
| 1008 |
+
replacements = {
|
| 1009 |
+
x: self.args.size(x)
|
| 1010 |
+
for x in sorted_symbols
|
| 1011 |
+
if x.name.startswith("s") or x.name.startswith("ps")
|
| 1012 |
+
}
|
| 1013 |
+
return sympy_subs(index, replacements)
|
| 1014 |
+
|
| 1015 |
+
def create_cse_var(self, *args, **kwargs):
|
| 1016 |
+
return CSEVariable(*args, **kwargs)
|
| 1017 |
+
|
| 1018 |
+
|
| 1019 |
+
@dataclasses.dataclass
|
| 1020 |
+
class OptimizationContext:
|
| 1021 |
+
key: ClassVar[str] = "opt_ctx"
|
| 1022 |
+
|
| 1023 |
+
# Load value as mask
|
| 1024 |
+
is_load_as_mask: bool = False
|
| 1025 |
+
|
| 1026 |
+
dtype: torch.dtype = None
|
| 1027 |
+
ops_name: str = ""
|
| 1028 |
+
is_most_inner_loop_irrevelant: bool = False
|
| 1029 |
+
|
| 1030 |
+
# Load uint8 value as float32
|
| 1031 |
+
is_load_uint8_as_float: bool = False
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/cpp_prefix.h
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <algorithm>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
#include <cmath>
|
| 6 |
+
#include <cstdlib>
|
| 7 |
+
#include <limits>
|
| 8 |
+
#include <omp.h>
|
| 9 |
+
|
| 10 |
+
#include <ATen/NumericUtils.h>
|
| 11 |
+
#include <ATen/core/PhiloxRNGEngine.h>
|
| 12 |
+
#include <ATen/native/BinaryOps.h>
|
| 13 |
+
#include <ATen/native/Math.h>
|
| 14 |
+
|
| 15 |
+
#include <c10/util/BFloat16.h>
|
| 16 |
+
#include <c10/util/BFloat16-math.h>
|
| 17 |
+
#include <c10/util/Half.h>
|
| 18 |
+
|
| 19 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
|
| 20 |
+
#define INDUCTOR_USE_VECTOR_TYPES() 1
|
| 21 |
+
#else
|
| 22 |
+
#define INDUCTOR_USE_VECTOR_TYPES() 0
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 26 |
+
#include <ATen/cpu/vec/functional.h>
|
| 27 |
+
#include <ATen/cpu/vec/vec.h>
|
| 28 |
+
#endif
|
| 29 |
+
|
| 30 |
+
typedef at::Half half;
|
| 31 |
+
typedef at::BFloat16 bfloat16;
|
| 32 |
+
|
| 33 |
+
template <typename T>
|
| 34 |
+
struct Welford {
|
| 35 |
+
T mean = T(0);
|
| 36 |
+
T m2 = T(0);
|
| 37 |
+
T weight = T(0);
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
template <typename T>
|
| 42 |
+
struct IsVecType: std::false_type {};
|
| 43 |
+
|
| 44 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 45 |
+
template <typename T>
|
| 46 |
+
struct IsVecType<at::vec::Vectorized<T>>: std::true_type {};
|
| 47 |
+
#endif
|
| 48 |
+
|
| 49 |
+
template <typename T>
|
| 50 |
+
Welford<T> welford_combine(const Welford<T> &a, const Welford<T> &b) {
|
| 51 |
+
if constexpr (!IsVecType<T>::value) {
|
| 52 |
+
if (a.weight == 0) {
|
| 53 |
+
return b;
|
| 54 |
+
}
|
| 55 |
+
if (b.weight == 0) {
|
| 56 |
+
return a;
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
auto delta = b.mean - a.mean;
|
| 60 |
+
auto new_weight = a.weight + b.weight;
|
| 61 |
+
auto wb_over_w = b.weight / new_weight;
|
| 62 |
+
if constexpr (IsVecType<T>::value) {
|
| 63 |
+
// Guard against division by zero
|
| 64 |
+
wb_over_w = T::blendv(wb_over_w, T(0), new_weight == T(0));
|
| 65 |
+
}
|
| 66 |
+
auto result = Welford<T>{
|
| 67 |
+
a.mean + delta * wb_over_w,
|
| 68 |
+
a.m2 + b.m2 + delta * delta * a.weight * wb_over_w,
|
| 69 |
+
new_weight
|
| 70 |
+
};
|
| 71 |
+
return result;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template <typename T>
|
| 75 |
+
Welford<T> welford_combine(const Welford<T> &acc, T data) {
|
| 76 |
+
// Add a single data point
|
| 77 |
+
auto delta = data - acc.mean;
|
| 78 |
+
auto new_weight = acc.weight + T(1);
|
| 79 |
+
auto new_mean = acc.mean + delta / new_weight;
|
| 80 |
+
auto new_delta = data - new_mean;
|
| 81 |
+
auto result = Welford<T>{
|
| 82 |
+
new_mean,
|
| 83 |
+
acc.m2 + delta * new_delta,
|
| 84 |
+
new_weight
|
| 85 |
+
};
|
| 86 |
+
return result;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
#if INDUCTOR_USE_VECTOR_TYPES()
|
| 91 |
+
template <typename scalar_t>
|
| 92 |
+
inline at::vec::Vectorized<scalar_t> vec_shuffle_down(at::vec::Vectorized<scalar_t> x, size_t n) {
|
| 93 |
+
using Vec = at::vec::Vectorized<scalar_t>;
|
| 94 |
+
alignas(alignof(Vec)) scalar_t array[Vec::size()];
|
| 95 |
+
x.store(array);
|
| 96 |
+
for (size_t i = 0; i + n < Vec::size(); i += 2 * n) {
|
| 97 |
+
array[i] = array[i + n];
|
| 98 |
+
}
|
| 99 |
+
return Vec::loadu(array);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
#ifdef CPU_CAPABILITY_AVX2
|
| 103 |
+
inline at::vec::Vectorized<float> vec_shuffle_down(at::vec::Vectorized<float> x, size_t n) {
|
| 104 |
+
using vec_t = at::vec::Vectorized<float>;
|
| 105 |
+
#define SHUFFLE_MASK(z, y, x, w) ((z << 6) | (y << 4) | (x << 2) | w)
|
| 106 |
+
switch (n) {
|
| 107 |
+
case 1:
|
| 108 |
+
return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(1, 1, 3, 3)));
|
| 109 |
+
case 2:
|
| 110 |
+
return vec_t(_mm256_permute_ps(x, SHUFFLE_MASK(2, 2, 2, 2)));
|
| 111 |
+
case 4:
|
| 112 |
+
return vec_t(_mm256_permute2f128_ps(x, x, SHUFFLE_MASK(1, 1, 1, 1)));
|
| 113 |
+
}
|
| 114 |
+
TORCH_CHECK(false, "Unhandled vec_shuffle_down value ", n);
|
| 115 |
+
}
|
| 116 |
+
#endif
|
| 117 |
+
|
| 118 |
+
template <typename scalar_t>
|
| 119 |
+
Welford<scalar_t> welford_vec_reduce_all(Welford<at::vec::Vectorized<scalar_t>> acc) {
|
| 120 |
+
using Vec = at::vec::Vectorized<scalar_t>;
|
| 121 |
+
for (size_t n = 1; n < Vec::size(); n *= 2) {
|
| 122 |
+
auto shuffled = Welford<Vec>{
|
| 123 |
+
vec_shuffle_down(acc.mean, n),
|
| 124 |
+
vec_shuffle_down(acc.m2, n),
|
| 125 |
+
vec_shuffle_down(acc.weight, n)
|
| 126 |
+
};
|
| 127 |
+
acc = welford_combine(acc, shuffled);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
Welford<scalar_t> result;
|
| 131 |
+
alignas(alignof(Vec)) scalar_t array[Vec::size()];
|
| 132 |
+
acc.mean.store(array);
|
| 133 |
+
result.mean = array[0];
|
| 134 |
+
|
| 135 |
+
acc.m2.store(array);
|
| 136 |
+
result.m2 = array[0];
|
| 137 |
+
|
| 138 |
+
acc.weight.store(array);
|
| 139 |
+
result.weight = array[0];
|
| 140 |
+
|
| 141 |
+
return result;
|
| 142 |
+
}
|
| 143 |
+
#endif
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
template <typename T> inline T mod(T a, T b) { return a % b; }
|
| 147 |
+
template <> inline float mod(float a, float b) { return std::fmod(a, b); }
|
| 148 |
+
template <> inline double mod(double a, double b) { return std::fmod(a, b); }
|
| 149 |
+
|
| 150 |
+
template <typename scalar_t>
|
| 151 |
+
inline scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
|
| 152 |
+
if (at::_isnan(a)) {
|
| 153 |
+
return a;
|
| 154 |
+
}
|
| 155 |
+
return a > b ? a : b;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
template <typename scalar_t>
|
| 159 |
+
inline scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
|
| 160 |
+
if (at::_isnan(a)) {
|
| 161 |
+
return a;
|
| 162 |
+
}
|
| 163 |
+
return a < b ? a : b;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
constexpr float uint32_to_uniform_float(uint32_t value) {
|
| 167 |
+
// maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
|
| 168 |
+
constexpr float scale = 4.6566127342e-10;
|
| 169 |
+
return static_cast<float>(value & 0x7FFFFFFF) * scale;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
float normalized_rand_cpu(uint32_t seed, uint32_t offset) {
|
| 173 |
+
return uint32_to_uniform_float(at::Philox4_32(seed, 0, offset)());
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
float randn_cpu(uint32_t seed, uint32_t offset) {
|
| 177 |
+
at::Philox4_32 engine(seed, 0, offset);
|
| 178 |
+
return engine.randn(10);
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
uint64_t randint64_cpu(uint32_t seed, uint32_t offset, int64_t low, int64_t high) {
|
| 182 |
+
auto gen = at::Philox4_32(seed, 0, offset);
|
| 183 |
+
uint64_t r0 = gen();
|
| 184 |
+
uint64_t r1 = gen();
|
| 185 |
+
uint64_t result = r0 | (r1 << 32);
|
| 186 |
+
return (result % static_cast<uint64_t>(high - low)) + low;
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
template <typename T> struct AsIntegerType { typedef T type; };
|
| 190 |
+
template <> struct AsIntegerType<float> { typedef uint32_t type; };
|
| 191 |
+
template <> struct AsIntegerType<double> { typedef uint64_t type; };
|
| 192 |
+
template <> struct AsIntegerType<bfloat16> { typedef uint16_t type; };
|
| 193 |
+
|
| 194 |
+
template <typename T>
|
| 195 |
+
typename std::enable_if<!std::is_reduced_floating_point<T>::value, T>::type
|
| 196 |
+
inline fetch_value(volatile T *addr) {
|
| 197 |
+
return *addr;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
template <typename T>
|
| 201 |
+
typename std::enable_if<std::is_reduced_floating_point<T>::value, T>::type
|
| 202 |
+
inline fetch_value(volatile T *addr) {
|
| 203 |
+
return T(addr->x, T::from_bits());
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
template <typename T>
|
| 207 |
+
typename std::enable_if<!std::is_integral<T>::value>::type
|
| 208 |
+
atomic_add(volatile T *addr, T offset) {
|
| 209 |
+
typedef typename AsIntegerType<T>::type alt_type;
|
| 210 |
+
|
| 211 |
+
static_assert(sizeof(std::atomic<alt_type>) == sizeof(T),
|
| 212 |
+
"std::atomic issue");
|
| 213 |
+
|
| 214 |
+
alt_type expected;
|
| 215 |
+
|
| 216 |
+
alt_type desired;
|
| 217 |
+
|
| 218 |
+
std::atomic<alt_type> *atomic_addr = (std::atomic<alt_type> *)addr;
|
| 219 |
+
do {
|
| 220 |
+
T val = fetch_value(addr);
|
| 221 |
+
reinterpret_cast<T *>(&expected)[0] = val;
|
| 222 |
+
reinterpret_cast<T *>(&desired)[0] = val + offset;
|
| 223 |
+
} while (!atomic_addr->compare_exchange_weak(expected, desired,
|
| 224 |
+
std::memory_order_relaxed));
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
// Since C++20 float is supported by fetch_add, but the performance may not
|
| 228 |
+
// better than compare_exchange_weak, which can be checked by microbenchmark
|
| 229 |
+
// inductor_cpu_atomic.py
|
| 230 |
+
template <typename T>
|
| 231 |
+
typename std::enable_if<std::is_integral<T>::value>::type
|
| 232 |
+
atomic_add(volatile T *addr, T offset) {
|
| 233 |
+
static_assert(sizeof(std::atomic<T>) == sizeof(T),
|
| 234 |
+
"std::atomic issue");
|
| 235 |
+
std::atomic<T> *atomic_addr = (std::atomic<T> *)addr;
|
| 236 |
+
atomic_addr->fetch_add(offset, std::memory_order_relaxed);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
// This function is used to convert bool or uint8 to float mask for
|
| 240 |
+
// vectorization. The caller needs to make sure the src represents TRUE/FALSE
|
| 241 |
+
// correctly.
|
| 242 |
+
template <typename T>
|
| 243 |
+
inline float flag_to_float_scalar(T src) {
|
| 244 |
+
float ret;
|
| 245 |
+
*(uint32_t*)(&ret) = src ? 0xFFFFFFFF : 0;
|
| 246 |
+
return ret;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
#if defined(CPU_CAPABILITY_AVX512) || defined(CPU_CAPABILITY_AVX2)
|
| 250 |
+
|
| 251 |
+
inline at::vec::Vectorized<float> masked_load(const float* src, at::vec::Vectorized<float> mask) {
|
| 252 |
+
at::vec::Vectorized<float> zero_vec(0);
|
| 253 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 254 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 255 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 256 |
+
return _mm512_mask_loadu_ps(zero_vec, mmask, src);
|
| 257 |
+
# else // AVX2
|
| 258 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 259 |
+
auto mmask = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 260 |
+
return _mm256_maskload_ps(src, mmask);
|
| 261 |
+
# endif
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
template <typename T>
|
| 265 |
+
typename std::enable_if<std::is_same<T, bfloat16>::value || std::is_same<T, half>::value, at::vec::Vectorized<T>>::type
|
| 266 |
+
inline masked_load(const T* src, at::vec::Vectorized<float> mask) {
|
| 267 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 268 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 269 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 270 |
+
auto zero = _mm256_set1_epi16(0);
|
| 271 |
+
auto temp = _mm256_mask_loadu_epi16(zero, mmask, src);
|
| 272 |
+
return _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1);
|
| 273 |
+
# else // AVX2
|
| 274 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 275 |
+
auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 276 |
+
__at_align__ uint32_t mmask[8];
|
| 277 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
|
| 278 |
+
__at_align__ uint16_t result[16];
|
| 279 |
+
for (auto i = 0; i < 8; i++) {
|
| 280 |
+
result[i] = mmask[i] == 0xFFFFFFFF ? src[i].x: uint16_t(0);
|
| 281 |
+
}
|
| 282 |
+
return at::vec::Vectorized<T>::loadu(result);
|
| 283 |
+
# endif
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
inline at::vec::Vectorized<uint8_t> masked_load(const uint8_t* src, at::vec::Vectorized<float> mask) {
|
| 287 |
+
# if defined(CPU_CAPABILITY_AVX512)
|
| 288 |
+
auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
|
| 289 |
+
auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask), all_ones, _MM_CMPINT_EQ);
|
| 290 |
+
auto zero = _mm_set1_epi8(0);
|
| 291 |
+
auto temp = _mm_mask_loadu_epi8(zero, mmask, src);
|
| 292 |
+
return _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0);
|
| 293 |
+
# else // AVX2
|
| 294 |
+
auto all_ones = _mm256_set1_epi32(0xFFFFFFFF);
|
| 295 |
+
auto mmask_vec = _mm256_cmpeq_epi32(_mm256_castps_si256(mask), all_ones);
|
| 296 |
+
__at_align__ uint32_t mmask[8];
|
| 297 |
+
_mm256_storeu_si256(reinterpret_cast<__m256i*>(mmask), mmask_vec);
|
| 298 |
+
__at_align__ uint8_t result[32];
|
| 299 |
+
for (auto i = 0; i < 8; i++) {
|
| 300 |
+
result[i] = mmask[i] == 0xFFFFFFFF ? src[i]: uint8_t(0);
|
| 301 |
+
}
|
| 302 |
+
return at::vec::Vectorized<uint8_t>::loadu(result);
|
| 303 |
+
# endif
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
template <typename T>
|
| 307 |
+
inline at::vec::Vectorized<float> flag_to_float_vec(const T* src) {
|
| 308 |
+
__at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
|
| 309 |
+
#pragma unroll
|
| 310 |
+
for (int64_t i = 0; i < at::vec::Vectorized<float>::size(); i++) {
|
| 311 |
+
dst_tmp[i] = flag_to_float_scalar(src[i]);
|
| 312 |
+
}
|
| 313 |
+
return at::vec::Vectorized<float>::loadu(dst_tmp);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
template <typename scalar_t>
|
| 317 |
+
inline at::vec::Vectorized<float> cvt_lowp_fp_to_fp32(
|
| 318 |
+
at::vec::Vectorized<scalar_t> src) {
|
| 319 |
+
at::vec::Vectorized<float> res_vec1(0);
|
| 320 |
+
at::vec::Vectorized<float> res_vec2(0);
|
| 321 |
+
std::tie(res_vec1, res_vec2) = at::vec::convert_to_float<scalar_t>(src);
|
| 322 |
+
return res_vec1;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
template <typename scalar_t>
|
| 326 |
+
inline at::vec::Vectorized<scalar_t> cvt_fp32_to_lowp_fp(
|
| 327 |
+
at::vec::Vectorized<float> src) {
|
| 328 |
+
return at::vec::convert_from_float<scalar_t>(src, src);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
inline at::vec::Vectorized<float> mask_convert_to_float(at::vec::Vectorized<float> src) {
|
| 332 |
+
auto zeros = at::vec::Vectorized<float>(0);
|
| 333 |
+
auto ones = at::vec::Vectorized<float>(1);
|
| 334 |
+
return at::vec::Vectorized<float>::blendv(zeros, ones, src);
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
template <typename SRC>
|
| 338 |
+
inline at::vec::Vectorized<float> vec_convert_to_mask(at::vec::Vectorized<SRC> src) {
|
| 339 |
+
assert(
|
| 340 |
+
at::vec::Vectorized<float>::size() == at::vec::Vectorized<SRC>::size());
|
| 341 |
+
at::vec::Vectorized<float> res_vec(0);
|
| 342 |
+
__at_align__ float dst_tmp[at::vec::Vectorized<float>::size()];
|
| 343 |
+
__at_align__ SRC src_tmp[at::vec::Vectorized<SRC>::size()];
|
| 344 |
+
src.store(src_tmp);
|
| 345 |
+
|
| 346 |
+
#pragma unroll
|
| 347 |
+
for (int i = 0; i < at::vec::Vectorized<float>::size(); i++) {
|
| 348 |
+
*(uint32_t*)(dst_tmp + i) = src_tmp[i] ? 0xFFFFFFFF : 0;
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
return res_vec.loadu(dst_tmp);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
template <typename SRC>
|
| 355 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<SRC> src) {
|
| 356 |
+
return vec_convert_to_mask(src);
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
template <>
|
| 360 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<int> src) {
|
| 361 |
+
#if defined(CPU_CAPABILITY_AVX2)
|
| 362 |
+
return at::vec::Vectorized<float>(_mm256_castsi256_ps(src));
|
| 363 |
+
#else
|
| 364 |
+
return at::vec::Vectorized<float>(_mm512_castsi512_ps(src));
|
| 365 |
+
#endif
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
template <>
|
| 369 |
+
inline at::vec::Vectorized<float> to_float_mask(at::vec::Vectorized<float> src) {
|
| 370 |
+
return src;
|
| 371 |
+
}
|
| 372 |
+
#endif
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_foreach.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from collections import defaultdict
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import List, Tuple
|
| 5 |
+
|
| 6 |
+
from .. import metrics
|
| 7 |
+
from ..utils import ceildiv
|
| 8 |
+
from ..virtualized import V
|
| 9 |
+
from .common import IndentedBuffer, Kernel
|
| 10 |
+
from .triton import TritonKernel
|
| 11 |
+
from .triton_utils import config_of, signature_to_meta
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class PartitionState:
|
| 16 |
+
partitions: List[Tuple]
|
| 17 |
+
cur_partition: List[Tuple]
|
| 18 |
+
cur_count: int
|
| 19 |
+
|
| 20 |
+
def finalize(self):
|
| 21 |
+
if self.cur_partition:
|
| 22 |
+
self.partitions.append(self.cur_partition)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ForeachKernel(Kernel):
|
| 26 |
+
MAX_NUM_ARGS = 250 # number where I would no longer get triton errors
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def _update_partition(partition_state, node_rw_count, node_info):
|
| 30 |
+
if partition_state.cur_count + node_rw_count > ForeachKernel.MAX_NUM_ARGS:
|
| 31 |
+
partition_state.partitions.append(partition_state.cur_partition)
|
| 32 |
+
partition_state.cur_partition = [node_info]
|
| 33 |
+
partition_state.cur_count = node_rw_count
|
| 34 |
+
else:
|
| 35 |
+
partition_state.cur_count += node_rw_count
|
| 36 |
+
partition_state.cur_partition.append(node_info)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def horizontal_partition(subkernel_nodes, triton_scheduling):
|
| 40 |
+
"""Generates a list of lists of node info tuples which consist of (fused_nodes, tiling, numel, rnumel)
|
| 41 |
+
for each subkernel node where each sublist is guaranteed to not exceed CUDA limits for number of args
|
| 42 |
+
(read/writes) and to have the same 2D or 1D blocking strategy."""
|
| 43 |
+
assert len(subkernel_nodes) >= 1
|
| 44 |
+
|
| 45 |
+
partition_state_1d = PartitionState([], [], 0)
|
| 46 |
+
yelem_to_partition_state_2d = defaultdict(lambda: PartitionState([], [], 0))
|
| 47 |
+
|
| 48 |
+
for node in subkernel_nodes:
|
| 49 |
+
fused_nodes = node.get_nodes()
|
| 50 |
+
_, (numel, rnumel) = max(
|
| 51 |
+
fused_nodes, key=lambda x: int(x.is_reduction())
|
| 52 |
+
).group
|
| 53 |
+
tiled_groups = triton_scheduling.select_tiling(fused_nodes, numel, rnumel)
|
| 54 |
+
node_info = fused_nodes, tiled_groups, numel, rnumel
|
| 55 |
+
|
| 56 |
+
read_writes = node.read_writes
|
| 57 |
+
read_write_count = len(read_writes.reads) + len(read_writes.writes)
|
| 58 |
+
|
| 59 |
+
if tiled_groups[1] == 1:
|
| 60 |
+
ForeachKernel._update_partition(
|
| 61 |
+
partition_state_1d, read_write_count, node_info
|
| 62 |
+
)
|
| 63 |
+
else:
|
| 64 |
+
y_elem = tiled_groups[0]
|
| 65 |
+
partition_state_2d = yelem_to_partition_state_2d[y_elem]
|
| 66 |
+
ForeachKernel._update_partition(
|
| 67 |
+
partition_state_2d, read_write_count, node_info
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
partition_state_1d.finalize()
|
| 71 |
+
all_partitions = partition_state_1d.partitions
|
| 72 |
+
for partition_state_2d in yelem_to_partition_state_2d.values():
|
| 73 |
+
partition_state_2d.finalize()
|
| 74 |
+
all_partitions.extend(partition_state_2d.partitions)
|
| 75 |
+
|
| 76 |
+
return all_partitions
|
| 77 |
+
|
| 78 |
+
def __init__(self):
|
| 79 |
+
super().__init__()
|
| 80 |
+
self.blocking_2d = False
|
| 81 |
+
self.block_size_1d = 1024 # Try tuning this value
|
| 82 |
+
self.block_size_2d = 32
|
| 83 |
+
self.num_warps = 8
|
| 84 |
+
self.sub_kernels = []
|
| 85 |
+
self.iter_vars_count = itertools.count()
|
| 86 |
+
self.x_block_count = 0
|
| 87 |
+
self.y_block_count = 0
|
| 88 |
+
|
| 89 |
+
def get_block_size(self):
|
| 90 |
+
if self.blocking_2d:
|
| 91 |
+
return self.block_size_2d
|
| 92 |
+
else:
|
| 93 |
+
return self.block_size_1d
|
| 94 |
+
|
| 95 |
+
@staticmethod
|
| 96 |
+
def codegen_pid_offsets(code, block_count, lower_bound, prefix):
|
| 97 |
+
if block_count == 0:
|
| 98 |
+
code.splice(f"{prefix}pid_offset = {prefix}pid")
|
| 99 |
+
else:
|
| 100 |
+
code.splice(f"{prefix}pid_offset = {prefix}pid - {lower_bound}")
|
| 101 |
+
|
| 102 |
+
def codegen_pid_range(self, code, x_elems):
|
| 103 |
+
num_x_blocks = ceildiv(x_elems, self.get_block_size())
|
| 104 |
+
upper_bound_x_pid = self.x_block_count + num_x_blocks
|
| 105 |
+
lower_bound_x_pid = self.x_block_count
|
| 106 |
+
|
| 107 |
+
if self.x_block_count == 0:
|
| 108 |
+
cond = "if"
|
| 109 |
+
else:
|
| 110 |
+
cond = "elif"
|
| 111 |
+
|
| 112 |
+
x_pid_bounds_check = (
|
| 113 |
+
f"xpid >= {lower_bound_x_pid} and xpid < {upper_bound_x_pid}"
|
| 114 |
+
)
|
| 115 |
+
code.splice(f"{cond} {x_pid_bounds_check}:")
|
| 116 |
+
|
| 117 |
+
with code.indent():
|
| 118 |
+
ForeachKernel.codegen_pid_offsets(
|
| 119 |
+
code, num_x_blocks, lower_bound_x_pid, "x"
|
| 120 |
+
)
|
| 121 |
+
self.x_block_count += num_x_blocks
|
| 122 |
+
|
| 123 |
+
def create_sub_kernel(self, *groups, index_dtype, mutations, reduction_hint):
|
| 124 |
+
sub_kernel = TritonKernel(
|
| 125 |
+
*groups,
|
| 126 |
+
index_dtype=index_dtype,
|
| 127 |
+
mutations=mutations,
|
| 128 |
+
pid_cache={
|
| 129 |
+
"tl.program_id(0)": "xpid_offset",
|
| 130 |
+
"tl.program_id(1)": "ypid",
|
| 131 |
+
},
|
| 132 |
+
reduction_hint=reduction_hint,
|
| 133 |
+
)
|
| 134 |
+
if self.blocking_2d:
|
| 135 |
+
assert len(groups) == 3
|
| 136 |
+
|
| 137 |
+
self.blocking_2d |= groups[1] != 1 and len(groups) == 3
|
| 138 |
+
metrics.generated_kernel_count -= 1
|
| 139 |
+
sub_kernel.args = self.args
|
| 140 |
+
sub_kernel.iter_vars_count = self.iter_vars_count
|
| 141 |
+
sub_kernel.cse.iter_buffer_ids = self.cse.iter_buffer_ids
|
| 142 |
+
self.sub_kernels.append(sub_kernel)
|
| 143 |
+
return sub_kernel
|
| 144 |
+
|
| 145 |
+
def jit_line(self):
|
| 146 |
+
can_use_32bit = all(k.index_dtype == "tl.int32" for k in self.sub_kernels)
|
| 147 |
+
index_dtype = "tl.int32" if can_use_32bit else "tl.int64"
|
| 148 |
+
_, _, signature = self.args.python_argdefs()
|
| 149 |
+
triton_meta = {
|
| 150 |
+
"signature": signature_to_meta(signature, size_dtype=can_use_32bit),
|
| 151 |
+
"device": V.graph.scheduler.current_device.index,
|
| 152 |
+
"device_type": V.graph.scheduler.current_device.type,
|
| 153 |
+
"constants": {},
|
| 154 |
+
}
|
| 155 |
+
triton_meta["configs"] = [config_of(signature)]
|
| 156 |
+
return (
|
| 157 |
+
f"@foreach(num_warps={self.num_warps}, meta={triton_meta!r})\n"
|
| 158 |
+
+ "@triton.jit"
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def grid(self):
|
| 162 |
+
return (
|
| 163 |
+
self.x_block_count,
|
| 164 |
+
ceildiv(int(self.sub_kernels[0].numels[0]), self.block_size_2d)
|
| 165 |
+
if self.blocking_2d
|
| 166 |
+
else 1,
|
| 167 |
+
1,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
def codegen_kernel(self, name=None):
|
| 171 |
+
code = IndentedBuffer()
|
| 172 |
+
|
| 173 |
+
code.splice(
|
| 174 |
+
"""
|
| 175 |
+
import triton
|
| 176 |
+
import triton.language as tl
|
| 177 |
+
from torch._inductor.triton_heuristics import foreach
|
| 178 |
+
from torch._inductor.utils import instance_descriptor
|
| 179 |
+
from torch._inductor import triton_helpers
|
| 180 |
+
"""
|
| 181 |
+
)
|
| 182 |
+
argdefs, _, _ = self.args.python_argdefs()
|
| 183 |
+
code.writeline(self.jit_line())
|
| 184 |
+
code.writeline(f"def {name or 'KERNEL_NAME'}({', '.join(argdefs)}):")
|
| 185 |
+
|
| 186 |
+
with code.indent():
|
| 187 |
+
code.splice("xpid = tl.program_id(0)")
|
| 188 |
+
if self.blocking_2d:
|
| 189 |
+
code.splice("ypid = tl.program_id(1)")
|
| 190 |
+
code.splice(f"XBLOCK: tl.constexpr = {self.block_size_2d}")
|
| 191 |
+
code.splice(f"YBLOCK: tl.constexpr = {self.block_size_2d}")
|
| 192 |
+
else:
|
| 193 |
+
code.splice(f"XBLOCK: tl.constexpr = {self.block_size_1d}")
|
| 194 |
+
|
| 195 |
+
for sub_kernel in self.sub_kernels:
|
| 196 |
+
assert len(sub_kernel.numels) <= 3
|
| 197 |
+
# TODO mlazos: support dynamic shapes
|
| 198 |
+
numel_ind = 0 if not self.blocking_2d else 1
|
| 199 |
+
self.codegen_pid_range(code, int(sub_kernel.numels[numel_ind]))
|
| 200 |
+
with code.indent():
|
| 201 |
+
if self.blocking_2d:
|
| 202 |
+
code.splice(f"ynumel = {sub_kernel.numels[0]}")
|
| 203 |
+
code.splice(f"xnumel = {sub_kernel.numels[1]}")
|
| 204 |
+
else:
|
| 205 |
+
code.splice(f"xnumel = {sub_kernel.numels[0]}")
|
| 206 |
+
|
| 207 |
+
sub_kernel.codegen_body()
|
| 208 |
+
code.splice(sub_kernel.body)
|
| 209 |
+
|
| 210 |
+
code.splice("else:")
|
| 211 |
+
with code.indent():
|
| 212 |
+
code.splice("pass")
|
| 213 |
+
|
| 214 |
+
return code.getvalue()
|
| 215 |
+
|
| 216 |
+
def call_kernel(self, code, name: str):
|
| 217 |
+
_, call_args, _ = self.args.python_argdefs()
|
| 218 |
+
# dynamo wraps unspec variable as 0d CPU tensor, need convert to scalar
|
| 219 |
+
for i in range(len(call_args)):
|
| 220 |
+
if V.graph.is_unspec_arg(call_args[i]):
|
| 221 |
+
call_args[i] = call_args[i] + ".item()"
|
| 222 |
+
if V.graph.cpp_wrapper:
|
| 223 |
+
V.graph.wrapper_code.generate_kernel_call(
|
| 224 |
+
name, call_args, device_index=V.graph.scheduler.current_device.index
|
| 225 |
+
)
|
| 226 |
+
else:
|
| 227 |
+
# TODO: refactor generate_kernel_call
|
| 228 |
+
call_args_str = ", ".join(call_args)
|
| 229 |
+
stream_name = code.write_get_cuda_stream(
|
| 230 |
+
V.graph.scheduler.current_device.index
|
| 231 |
+
)
|
| 232 |
+
code.writeline(
|
| 233 |
+
f"{name}.run({call_args_str}, grid=({self.grid()}), stream={stream_name})"
|
| 234 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/triton_utils.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .. import config
|
| 2 |
+
from ..utils import instance_descriptor
|
| 3 |
+
from ..virtualized import V
|
| 4 |
+
from .common import SizeArg, TensorArg
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def signature_of(arg, *, size_dtype: str):
|
| 8 |
+
from triton.runtime.jit import JITFunction
|
| 9 |
+
|
| 10 |
+
if isinstance(arg, TensorArg):
|
| 11 |
+
tye = JITFunction._type_of(arg.dtype)
|
| 12 |
+
if V.graph.is_unspec_arg(arg.buffer):
|
| 13 |
+
# had unwrapped 0d tensor as scalar
|
| 14 |
+
new_tye = tye.lstrip("*")
|
| 15 |
+
if new_tye in ["fp16", "bf16"]:
|
| 16 |
+
return "fp32"
|
| 17 |
+
else:
|
| 18 |
+
return new_tye
|
| 19 |
+
else:
|
| 20 |
+
return tye
|
| 21 |
+
if isinstance(arg, SizeArg):
|
| 22 |
+
if size_dtype == "tl.int32":
|
| 23 |
+
return "i32"
|
| 24 |
+
elif size_dtype == "tl.int64":
|
| 25 |
+
return "i64"
|
| 26 |
+
else:
|
| 27 |
+
raise NotImplementedError(f"unhandled size_dtype {size_dtype}")
|
| 28 |
+
raise NotImplementedError(f"unhandled {type(arg)}: {arg}")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def signature_to_meta(signature, *, size_dtype: str):
|
| 32 |
+
return {
|
| 33 |
+
i: signature_of(arg, size_dtype=size_dtype) for i, arg in enumerate(signature)
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def config_of(args):
|
| 38 |
+
from ..compile_fx import ALIGNMENT
|
| 39 |
+
|
| 40 |
+
def is_aligned(x):
|
| 41 |
+
if isinstance(x, TensorArg):
|
| 42 |
+
return x.buffer not in V.graph.unaligned_buffers
|
| 43 |
+
if isinstance(x, SizeArg):
|
| 44 |
+
# TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with
|
| 45 |
+
# _maybe_evaluate_static...
|
| 46 |
+
if x.name.startswith("load_seed_offset"):
|
| 47 |
+
return False
|
| 48 |
+
else:
|
| 49 |
+
return V.graph.sizevars.statically_known_multiple_of(x.expr, ALIGNMENT)
|
| 50 |
+
raise NotImplementedError(f"unhandled {type(x)}: {x}")
|
| 51 |
+
|
| 52 |
+
def is_aligned_8(x):
|
| 53 |
+
"""
|
| 54 |
+
Roughly follow triton code here:
|
| 55 |
+
https://github.com/openai/triton/blob/5282ed890d453e10b9ee30076ef89115dd197761/python/triton/runtime/jit.py#L208-L222
|
| 56 |
+
"""
|
| 57 |
+
if isinstance(x, TensorArg):
|
| 58 |
+
return False
|
| 59 |
+
if isinstance(x, SizeArg):
|
| 60 |
+
# TODO(voz): These are kinda redundant, if we can solve out statically_known_multiple_of with
|
| 61 |
+
# _maybe_evaluate_static...
|
| 62 |
+
if x.name.startswith("load_seed_offset"):
|
| 63 |
+
return False
|
| 64 |
+
else:
|
| 65 |
+
return V.graph.sizevars.statically_known_multiple_of(x.expr, 8)
|
| 66 |
+
raise NotImplementedError(f"unhandled {type(x)}: {x}")
|
| 67 |
+
|
| 68 |
+
if config.triton.divisible_by_16:
|
| 69 |
+
divisible_by_16 = tuple(i for i, arg in enumerate(args) if is_aligned(arg))
|
| 70 |
+
else:
|
| 71 |
+
divisible_by_16 = ()
|
| 72 |
+
divisible_by_8 = tuple(i for i, arg in enumerate(args) if is_aligned_8(arg))
|
| 73 |
+
return instance_descriptor(divisible_by_16, (), (), divisible_by_8)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/codegen/wrapper.py
ADDED
|
@@ -0,0 +1,1505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import contextlib
|
| 3 |
+
import dataclasses
|
| 4 |
+
import functools
|
| 5 |
+
import os
|
| 6 |
+
import re
|
| 7 |
+
from itertools import count
|
| 8 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 9 |
+
|
| 10 |
+
import sympy
|
| 11 |
+
from sympy import Expr
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from torch._dynamo.utils import counters, dynamo_timed
|
| 15 |
+
from torch.fx.experimental.symbolic_shapes import SymTypes
|
| 16 |
+
from torch.fx.node import _get_qualified_name
|
| 17 |
+
|
| 18 |
+
from .. import codecache, config, ir
|
| 19 |
+
from ..codecache import CudaKernelParamCache
|
| 20 |
+
from ..utils import (
|
| 21 |
+
cache_on_self,
|
| 22 |
+
get_benchmark_name,
|
| 23 |
+
LineContext,
|
| 24 |
+
sympy_dot,
|
| 25 |
+
sympy_product,
|
| 26 |
+
)
|
| 27 |
+
from ..virtualized import V
|
| 28 |
+
from .common import CodeGen, DeferredLine, IndentedBuffer, PythonPrinter
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
pexpr = PythonPrinter().doprint
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def buffer_reuse_key(node: ir.Buffer):
|
| 35 |
+
size = node.get_size()
|
| 36 |
+
stride = node.get_stride()
|
| 37 |
+
last_element = sympy_dot([s - 1 for s in size], stride)
|
| 38 |
+
return (
|
| 39 |
+
node.get_device(),
|
| 40 |
+
node.get_dtype(),
|
| 41 |
+
V.graph.sizevars.simplify(sympy_product(size)),
|
| 42 |
+
# Detect gaps in tensor storage caused by strides
|
| 43 |
+
V.graph.sizevars.size_hint(last_element),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def is_int(s: str):
|
| 48 |
+
try:
|
| 49 |
+
int(s)
|
| 50 |
+
except ValueError:
|
| 51 |
+
return False
|
| 52 |
+
return True
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def is_float(s: str):
|
| 56 |
+
try:
|
| 57 |
+
float(s)
|
| 58 |
+
except ValueError:
|
| 59 |
+
return False
|
| 60 |
+
return True
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def convert_arg_type(python_type):
|
| 64 |
+
from .cpp import CONTAINER_PYTHON_TO_CPP, PYTHON_TO_CPP
|
| 65 |
+
|
| 66 |
+
if python_type == "Tensor":
|
| 67 |
+
# Conversions rules follow https://github.com/pytorch/pytorch/tree/main/aten/src/ATen/native#func
|
| 68 |
+
return f"at::{python_type} const&"
|
| 69 |
+
|
| 70 |
+
if python_type in PYTHON_TO_CPP:
|
| 71 |
+
return PYTHON_TO_CPP[python_type]
|
| 72 |
+
|
| 73 |
+
# Convert args of container types e.g. Optional[*]
|
| 74 |
+
for py_container, cpp_container in CONTAINER_PYTHON_TO_CPP.items():
|
| 75 |
+
container_match = re.findall(py_container + r"\[([a-zA-Z_]+)]", python_type)
|
| 76 |
+
if len(container_match) == 1:
|
| 77 |
+
contained_type = container_match[0]
|
| 78 |
+
assert (
|
| 79 |
+
contained_type in PYTHON_TO_CPP
|
| 80 |
+
), f"unsupported {py_container} type in convert_arg_type: {contained_type}"
|
| 81 |
+
cpp_contained_type = PYTHON_TO_CPP[contained_type]
|
| 82 |
+
return f"{cpp_container}<{cpp_contained_type}>"
|
| 83 |
+
|
| 84 |
+
raise AssertionError(f"unsupport python_type: {python_type}")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def convert_return_type(python_type):
|
| 88 |
+
# TODO: only support Tensor as func return type for now
|
| 89 |
+
# TODO: support alias
|
| 90 |
+
assert (
|
| 91 |
+
python_type == "Tensor"
|
| 92 |
+
), f"only support tensor output for cpp_wrapper, but receive type {python_type}"
|
| 93 |
+
return f"at::{python_type}"
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def get_cpp_op_schema(kernel):
|
| 97 |
+
# use x.real_type instead of x.type so that we get ScalarType instead of int
|
| 98 |
+
arg_types = [repr(x.real_type) for x in kernel._schema.arguments]
|
| 99 |
+
arg_names = [x.name for x in kernel._schema.arguments]
|
| 100 |
+
# TODO: only support len(returns) == 1 for now.
|
| 101 |
+
returns = [repr(x.type) for x in kernel._schema.returns]
|
| 102 |
+
assert (
|
| 103 |
+
len(returns) == 1
|
| 104 |
+
), f"only support 1 single output for cpp_wrapper, but {kernel.__name__} has {len(returns)} outputs"
|
| 105 |
+
return_value = returns[0]
|
| 106 |
+
cpp_return_value = convert_return_type(return_value)
|
| 107 |
+
|
| 108 |
+
cpp_arg_type = [
|
| 109 |
+
f"{convert_arg_type(arg_type)} {arg_name}"
|
| 110 |
+
for arg_type, arg_name in zip(arg_types, arg_names)
|
| 111 |
+
]
|
| 112 |
+
return f"{cpp_return_value}({', '.join(cpp_arg_type)})"
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@dataclasses.dataclass
|
| 116 |
+
class SymbolicCallArg:
|
| 117 |
+
inner: Any
|
| 118 |
+
|
| 119 |
+
def __str__(self):
|
| 120 |
+
return str(self.inner)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class MemoryPlanningState:
|
| 124 |
+
def __init__(self):
|
| 125 |
+
super().__init__()
|
| 126 |
+
self.reuse_pool: Dict[Any, List[FreeIfNotReusedLine]] = collections.defaultdict(
|
| 127 |
+
list
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def __contains__(self, key):
|
| 131 |
+
return bool(self.reuse_pool.get(key, None))
|
| 132 |
+
|
| 133 |
+
def pop(self, key) -> "FreeIfNotReusedLine":
|
| 134 |
+
item = self.reuse_pool[key].pop()
|
| 135 |
+
assert not item.is_reused
|
| 136 |
+
return item
|
| 137 |
+
|
| 138 |
+
def push(self, key, item: "FreeIfNotReusedLine"):
|
| 139 |
+
assert not item.is_reused
|
| 140 |
+
self.reuse_pool[key].append(item)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@dataclasses.dataclass
|
| 144 |
+
class EnterCudaDeviceContextManagerLine:
|
| 145 |
+
device_idx: int
|
| 146 |
+
first_time: bool
|
| 147 |
+
|
| 148 |
+
def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack):
|
| 149 |
+
if V.graph.cpp_wrapper:
|
| 150 |
+
code.writeline("\n")
|
| 151 |
+
if V.graph.aot_mode:
|
| 152 |
+
# In AOT mode, we have a stream provided as a param. A stream is
|
| 153 |
+
# associated with a device, so we never expect the device to change.
|
| 154 |
+
assert self.first_time
|
| 155 |
+
# CUDAStreamGuard sets the stream and the device.
|
| 156 |
+
code.writeline(
|
| 157 |
+
f"at::cuda::CUDAStreamGuard stream_guard("
|
| 158 |
+
f"at::cuda::getStreamFromExternal(stream, {self.device_idx}));"
|
| 159 |
+
)
|
| 160 |
+
else:
|
| 161 |
+
if self.first_time:
|
| 162 |
+
code.writeline(
|
| 163 |
+
f"at::cuda::CUDAGuard device_guard({self.device_idx});"
|
| 164 |
+
)
|
| 165 |
+
else:
|
| 166 |
+
code.writeline(f"device_guard.set_index({self.device_idx});")
|
| 167 |
+
else:
|
| 168 |
+
# Note _DeviceGuard has less overhead than device, but only accepts
|
| 169 |
+
# integers
|
| 170 |
+
code.writeline(f"with torch.cuda._DeviceGuard({self.device_idx}):")
|
| 171 |
+
device_cm_stack.enter_context(code.indent())
|
| 172 |
+
code.writeline(
|
| 173 |
+
f"torch.cuda.set_device({self.device_idx}) # no-op to ensure context"
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
class ExitCudaDeviceContextManagerLine:
|
| 178 |
+
def codegen(self, code: IndentedBuffer, device_cm_stack: contextlib.ExitStack):
|
| 179 |
+
if not V.graph.cpp_wrapper:
|
| 180 |
+
device_cm_stack.close()
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@dataclasses.dataclass
|
| 184 |
+
class MemoryPlanningLine:
|
| 185 |
+
wrapper: "WrapperCodeGen"
|
| 186 |
+
|
| 187 |
+
def plan(self, state: MemoryPlanningState) -> "MemoryPlanningLine":
|
| 188 |
+
"""First pass to find reuse"""
|
| 189 |
+
return self
|
| 190 |
+
|
| 191 |
+
def codegen(self, code: IndentedBuffer):
|
| 192 |
+
"""Second pass to output code"""
|
| 193 |
+
pass
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@dataclasses.dataclass
|
| 197 |
+
class AllocateLine(MemoryPlanningLine):
|
| 198 |
+
node: ir.Buffer
|
| 199 |
+
can_reuse: bool = True
|
| 200 |
+
|
| 201 |
+
def plan(self, state: MemoryPlanningState):
|
| 202 |
+
if self.node.get_name() in V.graph.removed_buffers:
|
| 203 |
+
return NullLine(self.wrapper)
|
| 204 |
+
|
| 205 |
+
# try to reuse a recently freed buffer
|
| 206 |
+
key = buffer_reuse_key(self.node)
|
| 207 |
+
if config.allow_buffer_reuse and key in state and self.can_reuse:
|
| 208 |
+
free_line = state.pop(key)
|
| 209 |
+
free_line.is_reused = True
|
| 210 |
+
return ReuseLine(self.wrapper, free_line.node, self.node)
|
| 211 |
+
|
| 212 |
+
return self
|
| 213 |
+
|
| 214 |
+
def codegen(self, code: IndentedBuffer):
|
| 215 |
+
assert self.node.get_name() not in V.graph.removed_buffers
|
| 216 |
+
line = self.wrapper.make_buffer_allocation(self.node)
|
| 217 |
+
code.writeline(line)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@dataclasses.dataclass
|
| 221 |
+
class FreeIfNotReusedLine(MemoryPlanningLine):
|
| 222 |
+
node: ir.Buffer
|
| 223 |
+
is_reused: bool = False
|
| 224 |
+
|
| 225 |
+
def plan(self, state: MemoryPlanningState):
|
| 226 |
+
assert not self.is_reused
|
| 227 |
+
if self.node.get_name() in V.graph.removed_buffers:
|
| 228 |
+
return NullLine(self.wrapper)
|
| 229 |
+
if config.allow_buffer_reuse:
|
| 230 |
+
state.push(buffer_reuse_key(self.node), self)
|
| 231 |
+
return self
|
| 232 |
+
|
| 233 |
+
def codegen(self, code: IndentedBuffer):
|
| 234 |
+
assert self.node.get_name() not in V.graph.removed_buffers
|
| 235 |
+
if not self.is_reused:
|
| 236 |
+
code.writeline(self.wrapper.make_buffer_free(self.node))
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@dataclasses.dataclass
|
| 240 |
+
class ReuseLine(MemoryPlanningLine):
|
| 241 |
+
node: ir.Buffer
|
| 242 |
+
reused_as: ir.Buffer
|
| 243 |
+
|
| 244 |
+
def plan(self, state: MemoryPlanningState):
|
| 245 |
+
if self.node.get_name() in V.graph.removed_buffers:
|
| 246 |
+
assert self.reused_as.get_name() in V.graph.removed_buffers
|
| 247 |
+
return NullLine(self.wrapper)
|
| 248 |
+
assert self.reused_as.get_name() not in V.graph.removed_buffers
|
| 249 |
+
return self
|
| 250 |
+
|
| 251 |
+
def codegen(self, code: IndentedBuffer):
|
| 252 |
+
assert self.node.get_name() not in V.graph.removed_buffers
|
| 253 |
+
assert self.reused_as.get_name() not in V.graph.removed_buffers
|
| 254 |
+
code.writeline(
|
| 255 |
+
self.wrapper.make_buffer_reuse(
|
| 256 |
+
self.node,
|
| 257 |
+
self.reused_as,
|
| 258 |
+
)
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class NullLine(MemoryPlanningLine):
|
| 263 |
+
pass
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class WrapperCodeGen(CodeGen):
|
| 267 |
+
"""
|
| 268 |
+
Generate outer wrapper in Python that calls the kernels.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
def __init__(self):
|
| 272 |
+
super().__init__()
|
| 273 |
+
self._names_iter = count()
|
| 274 |
+
self.header = IndentedBuffer()
|
| 275 |
+
self.prefix = IndentedBuffer()
|
| 276 |
+
self.wrapper_call = IndentedBuffer()
|
| 277 |
+
self.src_to_kernel = {}
|
| 278 |
+
self.kenel_numel_expr = set()
|
| 279 |
+
self.lines = []
|
| 280 |
+
self.declare = ""
|
| 281 |
+
self.ending = ""
|
| 282 |
+
self.open_bracket = "["
|
| 283 |
+
self.closed_bracket = "]"
|
| 284 |
+
self.comment = "#"
|
| 285 |
+
self.namespace = ""
|
| 286 |
+
self.none_str = "None"
|
| 287 |
+
self.optional_tensor_str = "None"
|
| 288 |
+
self.size = "size()"
|
| 289 |
+
self.stride = "stride()"
|
| 290 |
+
self.first_device_guard = True
|
| 291 |
+
self.supports_intermediate_hooks = True
|
| 292 |
+
self.expr_printer = pexpr
|
| 293 |
+
|
| 294 |
+
self.write_header()
|
| 295 |
+
self.write_prefix()
|
| 296 |
+
|
| 297 |
+
for name, hashed in V.graph.constant_reprs.items():
|
| 298 |
+
# include a hash so our code cache gives different constants different files
|
| 299 |
+
self.write_constant(name, hashed)
|
| 300 |
+
|
| 301 |
+
self.allocated = set()
|
| 302 |
+
self.freed = set()
|
| 303 |
+
|
| 304 |
+
# maps from reusing buffer to reused buffer
|
| 305 |
+
self.reuses = dict()
|
| 306 |
+
|
| 307 |
+
self.write_get_cuda_stream = functools.lru_cache(None)( # type: ignore[assignment]
|
| 308 |
+
self.write_get_cuda_stream
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
@functools.lru_cache(None)
|
| 312 |
+
def add_import_once(line):
|
| 313 |
+
self.header.writeline(line)
|
| 314 |
+
|
| 315 |
+
self.add_import_once = add_import_once
|
| 316 |
+
self._metas = {}
|
| 317 |
+
|
| 318 |
+
def write_constant(self, name, hashed):
|
| 319 |
+
self.header.writeline(f"{name} = None # {hashed}")
|
| 320 |
+
|
| 321 |
+
def write_header(self):
|
| 322 |
+
self.header.splice(
|
| 323 |
+
f"""
|
| 324 |
+
from ctypes import c_void_p, c_long
|
| 325 |
+
import torch
|
| 326 |
+
import math
|
| 327 |
+
import random
|
| 328 |
+
import os
|
| 329 |
+
import tempfile
|
| 330 |
+
from math import inf, nan
|
| 331 |
+
from torch._inductor.hooks import run_intermediate_hooks
|
| 332 |
+
from torch._inductor.utils import maybe_profile
|
| 333 |
+
|
| 334 |
+
from torch import empty_strided, device
|
| 335 |
+
from {codecache.__name__} import AsyncCompile
|
| 336 |
+
from torch._inductor.select_algorithm import extern_kernels
|
| 337 |
+
|
| 338 |
+
aten = torch.ops.aten
|
| 339 |
+
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
|
| 340 |
+
reinterpret_tensor = torch.ops.inductor._reinterpret_tensor
|
| 341 |
+
async_compile = AsyncCompile()
|
| 342 |
+
|
| 343 |
+
"""
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
@cache_on_self
|
| 347 |
+
def write_triton_header_once(self):
|
| 348 |
+
self.header.splice(
|
| 349 |
+
"""
|
| 350 |
+
import triton
|
| 351 |
+
import triton.language as tl
|
| 352 |
+
from torch._inductor.triton_heuristics import grid, start_graph, end_graph
|
| 353 |
+
from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
|
| 354 |
+
"""
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
def add_meta_once(self, meta):
|
| 358 |
+
meta = repr(meta)
|
| 359 |
+
if meta not in self._metas:
|
| 360 |
+
var = f"meta{len(self._metas)}"
|
| 361 |
+
self._metas[meta] = var
|
| 362 |
+
self.header.writeline(f"{var} = {meta}")
|
| 363 |
+
return self._metas[meta]
|
| 364 |
+
|
| 365 |
+
@cache_on_self
|
| 366 |
+
def get_output_refs(self):
|
| 367 |
+
return [x.codegen_reference() for x in V.graph.graph_outputs]
|
| 368 |
+
|
| 369 |
+
def mark_output_type(self):
|
| 370 |
+
return
|
| 371 |
+
|
| 372 |
+
def codegen_input_size_asserts(self):
|
| 373 |
+
for name, buf in V.graph.graph_inputs.items():
|
| 374 |
+
if isinstance(buf, sympy.Expr):
|
| 375 |
+
continue
|
| 376 |
+
|
| 377 |
+
# comparing strides for 0 size tensor is tricky. Ignore them for now.
|
| 378 |
+
if sympy_product(buf.get_size()) == 0:
|
| 379 |
+
continue
|
| 380 |
+
size = self.codegen_shape_tuple(buf.get_size())
|
| 381 |
+
stride = self.codegen_shape_tuple(buf.get_stride())
|
| 382 |
+
self.prefix.writeline(f"assert_size_stride({name}, {size}, {stride})")
|
| 383 |
+
|
| 384 |
+
def write_prefix(self):
|
| 385 |
+
self.prefix.splice(
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
async_compile.wait(globals())
|
| 389 |
+
del async_compile
|
| 390 |
+
|
| 391 |
+
def call(args):
|
| 392 |
+
"""
|
| 393 |
+
)
|
| 394 |
+
with self.prefix.indent():
|
| 395 |
+
if config.triton.debug_sync_graph:
|
| 396 |
+
self.prefix.writeline("torch.cuda.synchronize()")
|
| 397 |
+
inp_len = len(V.graph.graph_inputs.keys())
|
| 398 |
+
if inp_len != 0:
|
| 399 |
+
lhs = f"{', '.join(V.graph.graph_inputs.keys())}{'' if inp_len != 1 else ','}"
|
| 400 |
+
self.prefix.writeline(f"{lhs} = args")
|
| 401 |
+
self.prefix.writeline("args.clear()")
|
| 402 |
+
|
| 403 |
+
self.codegen_inputs(self.prefix, V.graph.graph_inputs)
|
| 404 |
+
if config.size_asserts:
|
| 405 |
+
self.codegen_input_size_asserts()
|
| 406 |
+
|
| 407 |
+
def write_get_cuda_stream(self, index):
|
| 408 |
+
self.write_triton_header_once()
|
| 409 |
+
name = f"stream{index}"
|
| 410 |
+
self.writeline(f"{name} = get_cuda_stream({index})")
|
| 411 |
+
return name
|
| 412 |
+
|
| 413 |
+
def next_kernel_suffix(self):
|
| 414 |
+
return f"{next(self._names_iter)}"
|
| 415 |
+
|
| 416 |
+
def codegen_device_guard_enter(self, device_idx):
|
| 417 |
+
self.writeline(
|
| 418 |
+
EnterCudaDeviceContextManagerLine(device_idx, self.first_device_guard)
|
| 419 |
+
)
|
| 420 |
+
self.first_device_guard = False
|
| 421 |
+
|
| 422 |
+
def codegen_device_guard_exit(self):
|
| 423 |
+
self.writeline(ExitCudaDeviceContextManagerLine())
|
| 424 |
+
|
| 425 |
+
def generate_return(self, output_refs):
|
| 426 |
+
if output_refs:
|
| 427 |
+
self.wrapper_call.writeline("return (" + ", ".join(output_refs) + ", )")
|
| 428 |
+
else:
|
| 429 |
+
self.wrapper_call.writeline("return ()")
|
| 430 |
+
|
| 431 |
+
def generate_end(self, result):
|
| 432 |
+
return
|
| 433 |
+
|
| 434 |
+
def generate_extern_kernel_alloc(self, output_name, kernel, args, origin_node):
|
| 435 |
+
self.writeline(
|
| 436 |
+
f"{self.declare}{output_name} = {kernel}({', '.join(args)}){self.ending}"
|
| 437 |
+
)
|
| 438 |
+
if (
|
| 439 |
+
self.supports_intermediate_hooks
|
| 440 |
+
and config.generate_intermediate_hooks
|
| 441 |
+
and origin_node is not None
|
| 442 |
+
):
|
| 443 |
+
counters["inductor"]["intermediate_hooks"] += 1
|
| 444 |
+
self.writeline(
|
| 445 |
+
f"run_intermediate_hooks({origin_node.name!r}, {output_name})"
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel):
|
| 449 |
+
if output_view:
|
| 450 |
+
args.append(f"out={output_view.codegen_reference()}")
|
| 451 |
+
else:
|
| 452 |
+
args.append(f"out={codegen_reference}")
|
| 453 |
+
self.writeline(f"{kernel}({', '.join(args)})")
|
| 454 |
+
|
| 455 |
+
def generate_scatter_fallback(
|
| 456 |
+
self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs
|
| 457 |
+
):
|
| 458 |
+
line = f"{kernel}({','.join(map(str, inputs))}"
|
| 459 |
+
if kernel == "aten.scatter_":
|
| 460 |
+
if reduce:
|
| 461 |
+
line += f", reduce={repr(reduce)}"
|
| 462 |
+
else:
|
| 463 |
+
line += ", ".join([""] + kwargs)
|
| 464 |
+
line += f"){self.ending}"
|
| 465 |
+
self.writeline(line)
|
| 466 |
+
|
| 467 |
+
def generate_extern_kernel_alloc_and_find_schema_if_needed(
|
| 468 |
+
self,
|
| 469 |
+
name,
|
| 470 |
+
kernel,
|
| 471 |
+
codegen_args,
|
| 472 |
+
cpp_op_schema,
|
| 473 |
+
cpp_kernel_key,
|
| 474 |
+
cpp_kernel_overload_name="",
|
| 475 |
+
):
|
| 476 |
+
self.writeline(f"{name} = {kernel}({', '.join(codegen_args)})")
|
| 477 |
+
|
| 478 |
+
@dynamo_timed
|
| 479 |
+
def generate(self):
|
| 480 |
+
result = IndentedBuffer()
|
| 481 |
+
result.splice(self.header)
|
| 482 |
+
|
| 483 |
+
out_names = V.graph.get_output_names()
|
| 484 |
+
with contextlib.ExitStack() as stack:
|
| 485 |
+
stack.enter_context(self.wrapper_call.indent())
|
| 486 |
+
if config.profiler_mark_wrapper_call:
|
| 487 |
+
self.generate_profiler_mark_wrapper_call(stack)
|
| 488 |
+
if config.profile_bandwidth:
|
| 489 |
+
self.write_triton_header_once()
|
| 490 |
+
self.wrapper_call.writeline("start_graph()")
|
| 491 |
+
|
| 492 |
+
while (
|
| 493 |
+
self.lines
|
| 494 |
+
and isinstance(self.lines[-1], MemoryPlanningLine)
|
| 495 |
+
# TODO: this seems legit, NullLine has no node
|
| 496 |
+
and self.lines[-1].node.name not in out_names # type: ignore[attr-defined]
|
| 497 |
+
):
|
| 498 |
+
# these lines will be pointless
|
| 499 |
+
self.lines.pop()
|
| 500 |
+
|
| 501 |
+
# codegen allocations in two passes
|
| 502 |
+
planning_state = MemoryPlanningState()
|
| 503 |
+
for i in range(len(self.lines)):
|
| 504 |
+
if isinstance(self.lines[i], MemoryPlanningLine):
|
| 505 |
+
self.lines[i] = self.lines[i].plan(planning_state)
|
| 506 |
+
|
| 507 |
+
device_cm_stack = contextlib.ExitStack()
|
| 508 |
+
for line in self.lines:
|
| 509 |
+
if isinstance(line, MemoryPlanningLine):
|
| 510 |
+
line.codegen(self.wrapper_call)
|
| 511 |
+
elif isinstance(
|
| 512 |
+
line,
|
| 513 |
+
(
|
| 514 |
+
EnterCudaDeviceContextManagerLine,
|
| 515 |
+
ExitCudaDeviceContextManagerLine,
|
| 516 |
+
),
|
| 517 |
+
):
|
| 518 |
+
line.codegen(self.wrapper_call, device_cm_stack)
|
| 519 |
+
else:
|
| 520 |
+
self.wrapper_call.writeline(line)
|
| 521 |
+
|
| 522 |
+
output_refs = self.get_output_refs()
|
| 523 |
+
self.mark_output_type()
|
| 524 |
+
if config.triton.debug_sync_graph:
|
| 525 |
+
self.wrapper_call.writeline("torch.cuda.synchronize()")
|
| 526 |
+
|
| 527 |
+
if config.profile_bandwidth:
|
| 528 |
+
self.wrapper_call.writeline("end_graph()")
|
| 529 |
+
|
| 530 |
+
self.generate_return(output_refs)
|
| 531 |
+
|
| 532 |
+
self.append_precomputed_sizes_to_prefix()
|
| 533 |
+
result.splice(self.prefix)
|
| 534 |
+
|
| 535 |
+
with result.indent():
|
| 536 |
+
result.splice(self.wrapper_call)
|
| 537 |
+
|
| 538 |
+
self.generate_end(result)
|
| 539 |
+
|
| 540 |
+
self.add_benchmark_harness(result)
|
| 541 |
+
|
| 542 |
+
return result.getvaluewithlinemap()
|
| 543 |
+
|
| 544 |
+
def codegen_inputs(self, code: IndentedBuffer, graph_inputs: Dict[str, ir.Buffer]):
|
| 545 |
+
"""Assign all symbolic shapes to locals"""
|
| 546 |
+
|
| 547 |
+
@functools.lru_cache(None)
|
| 548 |
+
def sizeof(name):
|
| 549 |
+
code.writeline(
|
| 550 |
+
f"{self.declare}{name}_size = {name}.{self.size}{self.ending}"
|
| 551 |
+
)
|
| 552 |
+
return f"{name}_size"
|
| 553 |
+
|
| 554 |
+
@functools.lru_cache(None)
|
| 555 |
+
def strideof(name):
|
| 556 |
+
code.writeline(
|
| 557 |
+
f"{self.declare}{name}_stride = {name}.{self.stride}{self.ending}"
|
| 558 |
+
)
|
| 559 |
+
return f"{name}_stride"
|
| 560 |
+
|
| 561 |
+
# Assign all symbolic shapes needed to local variables
|
| 562 |
+
needed = set(V.graph.sizevars.var_to_val.keys()) - set(
|
| 563 |
+
V.graph.sizevars.replacements.keys()
|
| 564 |
+
)
|
| 565 |
+
|
| 566 |
+
def is_expr(x):
|
| 567 |
+
return isinstance(x[1], sympy.Expr)
|
| 568 |
+
|
| 569 |
+
graph_inputs_expr = list(filter(is_expr, graph_inputs.items()))
|
| 570 |
+
graph_inputs_tensors = list(
|
| 571 |
+
filter(lambda x: not is_expr(x), graph_inputs.items())
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
for name, shape in graph_inputs_expr:
|
| 575 |
+
shape = V.graph.sizevars.simplify(shape)
|
| 576 |
+
if shape in needed:
|
| 577 |
+
needed.remove(shape)
|
| 578 |
+
code.writeline(f"{self.declare}{shape} = {name}{self.ending}")
|
| 579 |
+
|
| 580 |
+
for name, value in graph_inputs_tensors:
|
| 581 |
+
shapes = value.get_size()
|
| 582 |
+
for dim, shape in enumerate(shapes):
|
| 583 |
+
shape = V.graph.sizevars.simplify(shape)
|
| 584 |
+
if shape in needed:
|
| 585 |
+
needed.remove(shape)
|
| 586 |
+
code.writeline(
|
| 587 |
+
f"{self.declare}{shape} = {sizeof(name)}[{dim}]{self.ending}"
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
for name, value in graph_inputs_tensors:
|
| 591 |
+
shapes = value.get_stride()
|
| 592 |
+
for dim, shape in enumerate(shapes):
|
| 593 |
+
shape = V.graph.sizevars.simplify(shape)
|
| 594 |
+
if shape in needed:
|
| 595 |
+
needed.remove(shape)
|
| 596 |
+
code.writeline(
|
| 597 |
+
f"{self.declare}{shape} = {strideof(name)}[{dim}]{self.ending}"
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
def append_precomputed_sizes_to_prefix(self):
|
| 601 |
+
with self.prefix.indent():
|
| 602 |
+
for sym, expr in V.graph.sizevars.inv_precomputed_replacements.items():
|
| 603 |
+
self.prefix.writeline(
|
| 604 |
+
f"{self.declare}{sym} = {self.expr_printer(expr)}{self.ending}"
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
def codegen_python_sizevar(self, x: Expr) -> str:
|
| 608 |
+
return pexpr(V.graph.sizevars.simplify(x))
|
| 609 |
+
|
| 610 |
+
def codegen_sizevar(self, x: Expr) -> str:
|
| 611 |
+
return self.codegen_python_sizevar(x)
|
| 612 |
+
|
| 613 |
+
def codegen_tuple_access(self, basename: str, index: str) -> str:
|
| 614 |
+
return f"{basename}[{index}]"
|
| 615 |
+
|
| 616 |
+
def codegen_python_shape_tuple(self, shape: Tuple[Expr, ...]) -> str:
|
| 617 |
+
parts = list(map(self.codegen_python_sizevar, shape))
|
| 618 |
+
if len(parts) == 0:
|
| 619 |
+
return "()"
|
| 620 |
+
if len(parts) == 1:
|
| 621 |
+
return f"({parts[0]}, )"
|
| 622 |
+
return f"({', '.join(parts)})"
|
| 623 |
+
|
| 624 |
+
def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str:
|
| 625 |
+
return self.codegen_python_shape_tuple(shape)
|
| 626 |
+
|
| 627 |
+
def benchmark_compiled_module(self, output):
|
| 628 |
+
def add_fake_input(name, shape, stride, device, dtype):
|
| 629 |
+
output.writeline(
|
| 630 |
+
f"{name} = rand_strided("
|
| 631 |
+
f"{self.codegen_python_shape_tuple(shape)}, "
|
| 632 |
+
f"{self.codegen_python_shape_tuple(stride)}, "
|
| 633 |
+
f"device='{device}', dtype={dtype})"
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
def add_expr_input(name, val):
|
| 637 |
+
output.writeline(f"{name} = {val}")
|
| 638 |
+
|
| 639 |
+
output.writelines(
|
| 640 |
+
["", "", "def benchmark_compiled_module(times=10, repeat=10):"]
|
| 641 |
+
)
|
| 642 |
+
with output.indent():
|
| 643 |
+
output.splice(
|
| 644 |
+
"""
|
| 645 |
+
from torch._dynamo.testing import rand_strided
|
| 646 |
+
from torch._inductor.utils import print_performance
|
| 647 |
+
""",
|
| 648 |
+
strip=True,
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
for name, value in V.graph.constants.items():
|
| 652 |
+
# all the constants are global variables, that's why we need
|
| 653 |
+
# these 'global var_name' lines
|
| 654 |
+
output.writeline(f"global {name}")
|
| 655 |
+
add_fake_input(
|
| 656 |
+
name, value.size(), value.stride(), value.device, value.dtype
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
for name, value in V.graph.graph_inputs.items():
|
| 660 |
+
if isinstance(value, sympy.Expr): # Don't need to add symbolic
|
| 661 |
+
add_expr_input(name, V.graph.sizevars.size_hint(value))
|
| 662 |
+
else:
|
| 663 |
+
shape = [V.graph.sizevars.size_hint(x) for x in value.get_size()]
|
| 664 |
+
stride = [V.graph.sizevars.size_hint(x) for x in value.get_stride()]
|
| 665 |
+
add_fake_input(
|
| 666 |
+
name, shape, stride, value.get_device(), value.get_dtype()
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
call_str = f"call([{', '.join(V.graph.graph_inputs.keys())}])"
|
| 670 |
+
output.writeline(
|
| 671 |
+
f"return print_performance(lambda: {call_str}, times=times, repeat=repeat)"
|
| 672 |
+
)
|
| 673 |
+
|
| 674 |
+
def add_benchmark_harness(self, output):
|
| 675 |
+
"""
|
| 676 |
+
Append a benchmark harness to generated code for debugging
|
| 677 |
+
"""
|
| 678 |
+
if not config.benchmark_harness:
|
| 679 |
+
return
|
| 680 |
+
|
| 681 |
+
self.benchmark_compiled_module(output)
|
| 682 |
+
|
| 683 |
+
output.writelines(["", "", 'if __name__ == "__main__":'])
|
| 684 |
+
with output.indent():
|
| 685 |
+
output.writelines(
|
| 686 |
+
[
|
| 687 |
+
"from torch._inductor.wrapper_benchmark import compiled_module_main",
|
| 688 |
+
f"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)",
|
| 689 |
+
]
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
def define_kernel(
|
| 693 |
+
self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True
|
| 694 |
+
):
|
| 695 |
+
metadata_comment = f"{metadata}\n" if metadata else ""
|
| 696 |
+
self.header.splice(f"\n\n{metadata_comment}{name} = {kernel}")
|
| 697 |
+
|
| 698 |
+
def generate_numel_expr(self, kernel_name: str, tree):
|
| 699 |
+
expr = f"{kernel_name}_{tree.prefix}numel"
|
| 700 |
+
if expr not in self.kenel_numel_expr:
|
| 701 |
+
self.kenel_numel_expr.add(expr)
|
| 702 |
+
self.writeline(
|
| 703 |
+
f"{self.declare}{expr} = {self.expr_printer(tree.numel)}{self.ending}"
|
| 704 |
+
)
|
| 705 |
+
else:
|
| 706 |
+
self.writeline(f"{expr} = {self.expr_printer(tree.numel)}{self.ending}")
|
| 707 |
+
# We can get symbolic expressions here, like s0*64
|
| 708 |
+
# It is fine to have them here, but we need to handle them correctly as their own type
|
| 709 |
+
# This is tricky to do, so we wrap in a custom type, distinct from scalars, but also from sympy*
|
| 710 |
+
# scalars as well.
|
| 711 |
+
# This is handled in `generate_args_decl` which has a correct comment of: TODO: only works for
|
| 712 |
+
# constant now, need type info. I agree, this needs type info, and while this is not true type info
|
| 713 |
+
# it suffices as a type hint for the purposes of producing the correct code for this type.
|
| 714 |
+
return SymbolicCallArg(expr)
|
| 715 |
+
|
| 716 |
+
def wrap_kernel_call(self, name, call_args):
|
| 717 |
+
return f"{name}({', '.join(call_args)}){self.ending}"
|
| 718 |
+
|
| 719 |
+
def generate_profiler_mark_wrapper_call(self, stack):
|
| 720 |
+
self.wrapper_call.writeline("from torch.profiler import record_function")
|
| 721 |
+
self.wrapper_call.writeline(
|
| 722 |
+
f"with record_function('graph_{V.graph.graph_id}_inductor_wrapper_call'):"
|
| 723 |
+
)
|
| 724 |
+
stack.enter_context(self.wrapper_call.indent())
|
| 725 |
+
|
| 726 |
+
def generate_kernel_call(
|
| 727 |
+
self, name, call_args, grid=None, device_index=None, cuda=True
|
| 728 |
+
):
|
| 729 |
+
if cuda:
|
| 730 |
+
call_args_str = ", ".join(pexpr(item) for item in call_args)
|
| 731 |
+
grid_str = ", ".join(pexpr(item) for item in grid)
|
| 732 |
+
stream_name = self.write_get_cuda_stream(
|
| 733 |
+
V.graph.scheduler.current_device.index
|
| 734 |
+
)
|
| 735 |
+
self.writeline(
|
| 736 |
+
f"{name}.run({call_args_str}, grid=grid({grid_str}), stream={stream_name})"
|
| 737 |
+
)
|
| 738 |
+
else:
|
| 739 |
+
self.writeline(self.wrap_kernel_call(name, call_args))
|
| 740 |
+
|
| 741 |
+
def writeline(self, line):
|
| 742 |
+
self.lines.append(line)
|
| 743 |
+
|
| 744 |
+
def enter_context(self, ctx):
|
| 745 |
+
self.lines.append(LineContext(ctx))
|
| 746 |
+
|
| 747 |
+
def val_to_arg_str(self, s):
|
| 748 |
+
if isinstance(s, SymTypes):
|
| 749 |
+
return pexpr(sympy.expand(repr(s)))
|
| 750 |
+
elif isinstance(s, sympy.Expr):
|
| 751 |
+
return pexpr(s)
|
| 752 |
+
elif isinstance(s, (tuple, list)):
|
| 753 |
+
|
| 754 |
+
@dataclasses.dataclass
|
| 755 |
+
class Shim:
|
| 756 |
+
ref: Any
|
| 757 |
+
|
| 758 |
+
def __repr__(self):
|
| 759 |
+
return self.ref
|
| 760 |
+
|
| 761 |
+
return repr(type(s)(Shim(self.val_to_arg_str(a)) for a in s))
|
| 762 |
+
elif isinstance(s, torch._ops.OpOverload):
|
| 763 |
+
return _get_qualified_name(s)
|
| 764 |
+
else:
|
| 765 |
+
return repr(s)
|
| 766 |
+
|
| 767 |
+
# The following methods are for memory management
|
| 768 |
+
def make_buffer_allocation(self, buffer):
|
| 769 |
+
device = buffer.get_device()
|
| 770 |
+
dtype = buffer.get_dtype()
|
| 771 |
+
shape = tuple(buffer.get_size())
|
| 772 |
+
stride = tuple(buffer.get_stride())
|
| 773 |
+
return (
|
| 774 |
+
f"{buffer.get_name()} = empty_strided("
|
| 775 |
+
f"{self.codegen_shape_tuple(shape)}, "
|
| 776 |
+
f"{self.codegen_shape_tuple(stride)}, "
|
| 777 |
+
f"device='{device.type}', dtype={dtype})"
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
def make_buffer_free(self, buffer):
|
| 781 |
+
return f"del {buffer.get_name()}"
|
| 782 |
+
|
| 783 |
+
def make_buffer_reuse(self, old, new):
|
| 784 |
+
assert old.get_dtype() == new.get_dtype()
|
| 785 |
+
del_line = ""
|
| 786 |
+
if old.get_name() not in V.graph.get_output_names():
|
| 787 |
+
del_line = f"; {self.make_buffer_free(old)}"
|
| 788 |
+
if old.get_size() == new.get_size() and old.get_stride() == new.get_stride():
|
| 789 |
+
return f"{self.declare}{new.get_name()} = {old.get_name()}{del_line} {self.comment} reuse"
|
| 790 |
+
|
| 791 |
+
return (
|
| 792 |
+
f"{self.declare}{new.get_name()} = reinterpret_tensor("
|
| 793 |
+
f"{old.get_name()}, "
|
| 794 |
+
f"{self.codegen_shape_tuple(new.get_size())}, "
|
| 795 |
+
f"{self.codegen_shape_tuple(new.get_stride())}){del_line} {self.comment} reuse"
|
| 796 |
+
)
|
| 797 |
+
|
| 798 |
+
def codegen_deferred_allocation(self, name, layout):
|
| 799 |
+
self.writeline(
|
| 800 |
+
DeferredLine(
|
| 801 |
+
name,
|
| 802 |
+
f"{self.declare}{name} = {layout.view.codegen_reference()}{self.ending} {self.comment} alias",
|
| 803 |
+
)
|
| 804 |
+
)
|
| 805 |
+
|
| 806 |
+
def use_preallocated_ouput(self, buffer):
|
| 807 |
+
# outputs are passed-in in the AOT mode
|
| 808 |
+
return (
|
| 809 |
+
V.graph.aot_mode
|
| 810 |
+
and buffer
|
| 811 |
+
and buffer.get_name() in set(V.graph.get_output_names())
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
def codegen_allocation(self, buffer):
|
| 815 |
+
name = buffer.get_name()
|
| 816 |
+
|
| 817 |
+
if name in V.graph.removed_buffers or name in self.allocated:
|
| 818 |
+
return
|
| 819 |
+
self.allocated.add(name)
|
| 820 |
+
if isinstance(
|
| 821 |
+
buffer,
|
| 822 |
+
(ir.ExternKernelAlloc, ir.MultiOutput),
|
| 823 |
+
):
|
| 824 |
+
return
|
| 825 |
+
|
| 826 |
+
layout = buffer.get_layout()
|
| 827 |
+
if isinstance(layout, ir.MutationLayout):
|
| 828 |
+
return
|
| 829 |
+
if isinstance(layout, ir.AliasedLayout):
|
| 830 |
+
assert isinstance(
|
| 831 |
+
layout.view, ir.ReinterpretView
|
| 832 |
+
), f"unexpected {type(layout.view)}: {layout.view}"
|
| 833 |
+
if not layout.maybe_guard_aligned():
|
| 834 |
+
V.graph.unaligned_buffers.add(name)
|
| 835 |
+
self.codegen_allocation(layout.view.data)
|
| 836 |
+
self.codegen_deferred_allocation(name, layout)
|
| 837 |
+
return
|
| 838 |
+
|
| 839 |
+
self.writeline(
|
| 840 |
+
AllocateLine(
|
| 841 |
+
self,
|
| 842 |
+
buffer,
|
| 843 |
+
not self.use_preallocated_ouput(buffer),
|
| 844 |
+
)
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
def codegen_free(self, buffer):
|
| 848 |
+
name = buffer.get_name()
|
| 849 |
+
|
| 850 |
+
# can be freed but not reused
|
| 851 |
+
if isinstance(buffer, ir.InputBuffer):
|
| 852 |
+
self.writeline(self.make_buffer_free(buffer))
|
| 853 |
+
return
|
| 854 |
+
|
| 855 |
+
if not self.can_reuse(buffer):
|
| 856 |
+
return
|
| 857 |
+
self.freed.add(name)
|
| 858 |
+
|
| 859 |
+
layout = buffer.get_layout()
|
| 860 |
+
if isinstance(layout, (ir.AliasedLayout, ir.MultiOutputLayout)):
|
| 861 |
+
self.writeline(self.make_buffer_free(buffer))
|
| 862 |
+
return
|
| 863 |
+
|
| 864 |
+
self.writeline(FreeIfNotReusedLine(self, buffer))
|
| 865 |
+
|
| 866 |
+
def can_reuse(self, input_buffer, output_buffer=None):
|
| 867 |
+
name = input_buffer.get_name()
|
| 868 |
+
if (
|
| 869 |
+
name in V.graph.removed_buffers
|
| 870 |
+
or name in V.graph.graph_inputs
|
| 871 |
+
or name in V.graph.constants
|
| 872 |
+
or name in self.freed
|
| 873 |
+
or self.use_preallocated_ouput(output_buffer)
|
| 874 |
+
):
|
| 875 |
+
return False
|
| 876 |
+
|
| 877 |
+
return True
|
| 878 |
+
|
| 879 |
+
def did_reuse(self, buffer, reused_buffer):
|
| 880 |
+
# Check whether a given buffer was reused by a possible reuser in the wrapper codegen
|
| 881 |
+
# Can be consulted from inside ir codegen, e.g. to determine whether a copy is needed
|
| 882 |
+
return (
|
| 883 |
+
buffer.get_name() in self.reuses
|
| 884 |
+
and self.reuses[buffer.get_name()] == reused_buffer.get_name()
|
| 885 |
+
)
|
| 886 |
+
|
| 887 |
+
def codegen_inplace_reuse(self, input_buffer, output_buffer):
|
| 888 |
+
assert buffer_reuse_key(input_buffer) == buffer_reuse_key(output_buffer)
|
| 889 |
+
self.codegen_allocation(input_buffer)
|
| 890 |
+
self.freed.add(input_buffer.get_name())
|
| 891 |
+
self.allocated.add(output_buffer.get_name())
|
| 892 |
+
self.reuses[output_buffer.get_name()] = input_buffer.get_name()
|
| 893 |
+
self.writeline(ReuseLine(self, input_buffer, output_buffer))
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
class CppWrapperCodeGen(WrapperCodeGen):
|
| 897 |
+
"""
|
| 898 |
+
Generates cpp wrapper for running on CPU and calls cpp kernels
|
| 899 |
+
"""
|
| 900 |
+
|
| 901 |
+
def __init__(self):
|
| 902 |
+
super().__init__()
|
| 903 |
+
from ..ir import OptionalTensor
|
| 904 |
+
|
| 905 |
+
self.declare = "auto "
|
| 906 |
+
self.ending = ";"
|
| 907 |
+
self.open_bracket = "{"
|
| 908 |
+
self.closed_bracket = "}"
|
| 909 |
+
self.comment = "//"
|
| 910 |
+
self.namespace = "at::"
|
| 911 |
+
self.none_str = "at::Tensor()"
|
| 912 |
+
self.optional_tensor_str = repr(OptionalTensor())
|
| 913 |
+
self.extern_call_ops = set()
|
| 914 |
+
self.size = "sizes()"
|
| 915 |
+
self.stride = "strides()"
|
| 916 |
+
self.call_func_name = "inductor_entry_cpp"
|
| 917 |
+
self.cuda = False
|
| 918 |
+
self.supports_intermediate_hooks = False
|
| 919 |
+
self.outputs_need_copy = set()
|
| 920 |
+
self.resized_outputs = {}
|
| 921 |
+
|
| 922 |
+
from .cpp import cexpr
|
| 923 |
+
|
| 924 |
+
self.expr_printer = cexpr
|
| 925 |
+
|
| 926 |
+
def write_constant(self, name, hashed):
|
| 927 |
+
# include a hash so our code cache gives different constants different files
|
| 928 |
+
self.header.writeline(f"// {name} {hashed}")
|
| 929 |
+
|
| 930 |
+
def write_header(self):
|
| 931 |
+
if V.graph.aot_mode:
|
| 932 |
+
with open(
|
| 933 |
+
os.path.join(os.path.dirname(__file__), "aot_inductor_interface.cpp")
|
| 934 |
+
) as f:
|
| 935 |
+
self.header.splice(f.read())
|
| 936 |
+
else:
|
| 937 |
+
self.header.splice(
|
| 938 |
+
"""
|
| 939 |
+
import torch
|
| 940 |
+
from torch._inductor.codecache import CppWrapperCodeCache
|
| 941 |
+
|
| 942 |
+
cpp_wrapper_src = (
|
| 943 |
+
'''
|
| 944 |
+
"""
|
| 945 |
+
)
|
| 946 |
+
|
| 947 |
+
self.header.splice(
|
| 948 |
+
"""
|
| 949 |
+
#include <torch/csrc/inductor/inductor_ops.h>
|
| 950 |
+
#define reinterpret_tensor torch::inductor::_reinterpret_tensor
|
| 951 |
+
"""
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
def mark_output_type(self):
|
| 955 |
+
# mark output type to unwrap tensor back to python scalar
|
| 956 |
+
from ..ir import ShapeAsConstantBuffer
|
| 957 |
+
|
| 958 |
+
output_is_tensor = dict()
|
| 959 |
+
for idx, x in enumerate(V.graph.graph_outputs):
|
| 960 |
+
if isinstance(x, ShapeAsConstantBuffer):
|
| 961 |
+
output_is_tensor[idx] = False
|
| 962 |
+
else:
|
| 963 |
+
output_is_tensor[idx] = True
|
| 964 |
+
|
| 965 |
+
self.output_is_tensor = output_is_tensor
|
| 966 |
+
|
| 967 |
+
def write_prefix(self):
|
| 968 |
+
if V.graph.aot_mode:
|
| 969 |
+
self.prefix.writeline("namespace torch {")
|
| 970 |
+
self.prefix.writeline("namespace aot_inductor {")
|
| 971 |
+
|
| 972 |
+
def write_wrapper_decl(self):
|
| 973 |
+
inputs_len = len(V.graph.graph_inputs.keys())
|
| 974 |
+
if V.graph.aot_mode:
|
| 975 |
+
self.prefix.splice(
|
| 976 |
+
"""
|
| 977 |
+
void AOTInductorModel::run_impl(
|
| 978 |
+
const std::vector<at::Tensor>& args,
|
| 979 |
+
std::vector<at::Tensor>& outputs,
|
| 980 |
+
cudaStream_t stream) {
|
| 981 |
+
"""
|
| 982 |
+
)
|
| 983 |
+
else:
|
| 984 |
+
self.prefix.splice(
|
| 985 |
+
f"""std::vector<at::Tensor> {self.call_func_name}(const std::vector<at::Tensor>& args) {{"""
|
| 986 |
+
)
|
| 987 |
+
with self.prefix.indent():
|
| 988 |
+
if inputs_len != 0:
|
| 989 |
+
for idx, input_key in enumerate(V.graph.graph_inputs.keys()):
|
| 990 |
+
# unwrap input tensor back to scalar
|
| 991 |
+
if isinstance(V.graph.graph_inputs[input_key], sympy.Expr):
|
| 992 |
+
from ..graph import may_get_constant_buffer_dtype
|
| 993 |
+
from .cpp import DTYPE_TO_CPP
|
| 994 |
+
|
| 995 |
+
dtype = may_get_constant_buffer_dtype(
|
| 996 |
+
V.graph.graph_inputs[input_key]
|
| 997 |
+
)
|
| 998 |
+
assert (
|
| 999 |
+
dtype is not None
|
| 1000 |
+
), "Fails to get the dtype of the sympy.Expr"
|
| 1001 |
+
cpp_dtype = DTYPE_TO_CPP[dtype]
|
| 1002 |
+
self.prefix.writeline(
|
| 1003 |
+
f"{cpp_dtype} {input_key} = args[{idx}].item<{cpp_dtype}>();"
|
| 1004 |
+
)
|
| 1005 |
+
else:
|
| 1006 |
+
self.prefix.writeline(f"at::Tensor {input_key} = args[{idx}];")
|
| 1007 |
+
|
| 1008 |
+
assert all(
|
| 1009 |
+
isinstance(v, torch.Tensor) for v in list(V.graph.constants.values())
|
| 1010 |
+
), "Expect all constants to be Tensor"
|
| 1011 |
+
for idx, constants_key in enumerate(V.graph.constants.keys()):
|
| 1012 |
+
constants_idx = inputs_len + idx
|
| 1013 |
+
self.prefix.writeline(
|
| 1014 |
+
f"at::Tensor {constants_key} = args[{constants_idx}];"
|
| 1015 |
+
)
|
| 1016 |
+
|
| 1017 |
+
self.codegen_inputs(self.prefix, V.graph.graph_inputs)
|
| 1018 |
+
|
| 1019 |
+
self.wrapper_call.splice(
|
| 1020 |
+
"""
|
| 1021 |
+
c10::optional<at::Scalar> optional_scalar;
|
| 1022 |
+
c10::optional<c10::string_view> optional_string;
|
| 1023 |
+
c10::optional<at::Layout> optional_layout;
|
| 1024 |
+
c10::optional<at::Tensor> optional_tensor;
|
| 1025 |
+
torch::List<c10::optional<at::Scalar>> optional_list;
|
| 1026 |
+
"""
|
| 1027 |
+
)
|
| 1028 |
+
|
| 1029 |
+
def codegen_model_constructor(self):
|
| 1030 |
+
"""
|
| 1031 |
+
// Generated code example
|
| 1032 |
+
AOTInductorModel::AOTInductorModel()
|
| 1033 |
+
: AOTInductorModelBase(4, 1) {
|
| 1034 |
+
inputs_info_[0].name = "linear.weight";
|
| 1035 |
+
inputs_info_[0].shape.reserve(2);
|
| 1036 |
+
inputs_info_[0].shape.emplace_back(10, 10, nullptr);
|
| 1037 |
+
inputs_info_[0].shape.emplace_back(64, 64, nullptr);
|
| 1038 |
+
...
|
| 1039 |
+
outputs_info_[0].name = "output0";
|
| 1040 |
+
outputs_info_[0].shape.reserve(2);
|
| 1041 |
+
outputs_info_[0].shape.emplace_back(32, 32, nullptr);
|
| 1042 |
+
outputs_info_[0].shape.emplace_back(10, 10, nullptr);
|
| 1043 |
+
}
|
| 1044 |
+
"""
|
| 1045 |
+
num_inputs = len(V.graph.graph_inputs)
|
| 1046 |
+
num_outputs = len(V.graph.graph_outputs)
|
| 1047 |
+
self.prefix.splice(
|
| 1048 |
+
f"""
|
| 1049 |
+
AOTInductorModel::AOTInductorModel()
|
| 1050 |
+
: AOTInductorModelBase({num_inputs}, {num_outputs}) {{
|
| 1051 |
+
"""
|
| 1052 |
+
)
|
| 1053 |
+
|
| 1054 |
+
with self.prefix.indent():
|
| 1055 |
+
for idx, name in enumerate(V.graph.graph_inputs.keys()):
|
| 1056 |
+
# TODO: handle symbolic expressions later.
|
| 1057 |
+
assert not isinstance(V.graph.graph_inputs[name], sympy.Expr)
|
| 1058 |
+
self.prefix.writeline(f"""inputs_info_[{idx}].name = "{name}";""")
|
| 1059 |
+
self.prefix.writeline(
|
| 1060 |
+
f"""inputs_info_[{idx}].dtype = "{V.graph.graph_inputs[name].get_dtype()}";"""
|
| 1061 |
+
)
|
| 1062 |
+
sizes = V.graph.graph_inputs[name].get_size()
|
| 1063 |
+
self.prefix.writeline(
|
| 1064 |
+
f"inputs_info_[{idx}].shape.reserve({len(sizes)});"
|
| 1065 |
+
)
|
| 1066 |
+
for size in sizes:
|
| 1067 |
+
# FIXME: set the lower bound and the upper bound to be "size".
|
| 1068 |
+
# Later, we should specify the correct range for dynamic dimentions.
|
| 1069 |
+
self.prefix.writeline(
|
| 1070 |
+
f"inputs_info_[{idx}].shape.emplace_back({size}, {size}, nullptr);"
|
| 1071 |
+
)
|
| 1072 |
+
|
| 1073 |
+
for idx, output in enumerate(V.graph.graph_outputs):
|
| 1074 |
+
# TODO: handle symbolic expressions later.
|
| 1075 |
+
assert not isinstance(output, sympy.Expr)
|
| 1076 |
+
self.prefix.writeline(f"""outputs_info_[{idx}].name = "output{idx}";""")
|
| 1077 |
+
self.prefix.writeline(
|
| 1078 |
+
f"""outputs_info_[{idx}].dtype = "{output.get_dtype()}";"""
|
| 1079 |
+
)
|
| 1080 |
+
sizes = output.get_size()
|
| 1081 |
+
self.prefix.writeline(
|
| 1082 |
+
f"outputs_info_[{idx}].shape.reserve({len(sizes)});"
|
| 1083 |
+
)
|
| 1084 |
+
for size in sizes:
|
| 1085 |
+
# FIXME: set the lower bound and the upper bound to be "size".
|
| 1086 |
+
# Later, we should specify the correct range for dynamic dimentions.
|
| 1087 |
+
self.prefix.writeline(
|
| 1088 |
+
f"outputs_info_[{idx}].shape.emplace_back({size}, {size}, nullptr);"
|
| 1089 |
+
)
|
| 1090 |
+
|
| 1091 |
+
self.prefix.writeline("}")
|
| 1092 |
+
|
| 1093 |
+
def generate(self):
|
| 1094 |
+
if V.graph.aot_mode:
|
| 1095 |
+
self.codegen_model_constructor()
|
| 1096 |
+
self.write_wrapper_decl()
|
| 1097 |
+
return super().generate()
|
| 1098 |
+
|
| 1099 |
+
def define_kernel(
|
| 1100 |
+
self, name: str, kernel: str, metadata: Optional[str] = None, cuda=False
|
| 1101 |
+
):
|
| 1102 |
+
self.header.splice(f"\n{kernel}\n")
|
| 1103 |
+
|
| 1104 |
+
def generate_return(self, output_refs):
|
| 1105 |
+
# Output tensors are allocated by the AOT runtime.
|
| 1106 |
+
if V.graph.aot_mode:
|
| 1107 |
+
for idx, output in enumerate(V.graph.graph_outputs):
|
| 1108 |
+
if hasattr(output, "get_name"):
|
| 1109 |
+
name = output.get_name()
|
| 1110 |
+
if name in self.outputs_need_copy:
|
| 1111 |
+
output_as_strided = output.codegen_reference()
|
| 1112 |
+
self.wrapper_call.writeline(
|
| 1113 |
+
f"outputs[{idx}].copy_({output_as_strided});"
|
| 1114 |
+
)
|
| 1115 |
+
resize_to = self.resized_outputs.get(name, None)
|
| 1116 |
+
if resize_to is not None:
|
| 1117 |
+
resize_to_args = ", ".join(
|
| 1118 |
+
self.expr_printer(d) for d in resize_to
|
| 1119 |
+
)
|
| 1120 |
+
self.wrapper_call.writeline(
|
| 1121 |
+
f"outputs[{idx}].resize_({{{resize_to_args}}});"
|
| 1122 |
+
)
|
| 1123 |
+
self.wrapper_call.writeline("\n}")
|
| 1124 |
+
else:
|
| 1125 |
+
self.wrapper_call.writeline(f"return {{{', '.join(output_refs)}}};\n}}")
|
| 1126 |
+
|
| 1127 |
+
def generate_end(self, result):
|
| 1128 |
+
if V.graph.aot_mode:
|
| 1129 |
+
result.writeline("} // namespace aot_inductor")
|
| 1130 |
+
result.writeline("} // namespace inductor")
|
| 1131 |
+
return
|
| 1132 |
+
|
| 1133 |
+
result.writeline("'''\n)")
|
| 1134 |
+
# get the hash of the wrapper code to name the extension
|
| 1135 |
+
wrapper_call_hash = codecache.code_hash(result.getvalue())
|
| 1136 |
+
result.splice(
|
| 1137 |
+
f"""
|
| 1138 |
+
module = CppWrapperCodeCache.load(cpp_wrapper_src, '{self.call_func_name}', '{wrapper_call_hash}', {self.cuda})
|
| 1139 |
+
"""
|
| 1140 |
+
)
|
| 1141 |
+
|
| 1142 |
+
# unwrap output tensor back to python scalar
|
| 1143 |
+
if all(x for x in self.output_is_tensor.values()):
|
| 1144 |
+
# If no ShapeAsConstantBuffer in the output, directly return the output as tensors
|
| 1145 |
+
return_str = "return f(args_tensor)"
|
| 1146 |
+
else:
|
| 1147 |
+
outputs = [
|
| 1148 |
+
f"outputs[{i}]" if self.output_is_tensor[i] else f"outputs[{i}].item()"
|
| 1149 |
+
for i in range(len(V.graph.graph_outputs))
|
| 1150 |
+
]
|
| 1151 |
+
outputs_str = f"[{', '.join(outputs)}]"
|
| 1152 |
+
return_str = f"""
|
| 1153 |
+
outputs = f(args_tensor)
|
| 1154 |
+
return {outputs_str}
|
| 1155 |
+
"""
|
| 1156 |
+
|
| 1157 |
+
args_str = "args_tensor = [arg if isinstance(arg, torch.Tensor) else torch.tensor(arg) for arg in args]"
|
| 1158 |
+
if V.graph.constants:
|
| 1159 |
+
# Append constants to the input args for cpp wrapper.
|
| 1160 |
+
# Python wrapper directly gets the value inside the wrapper call
|
| 1161 |
+
# as a global variable passed when calling exec(code, mod.__dict__, mod.__dict__).
|
| 1162 |
+
# For cpp wrapper, we need to pass this python value to the inductor_entry_cpp function explicitly.
|
| 1163 |
+
assert all(
|
| 1164 |
+
isinstance(v, torch.Tensor) for v in list(V.graph.constants.values())
|
| 1165 |
+
), "Expect all constants to be Tensor"
|
| 1166 |
+
constants_str = f"[{', '.join(V.graph.constants.keys())}]"
|
| 1167 |
+
args_str += f"""
|
| 1168 |
+
constants_tensor = {constants_str}
|
| 1169 |
+
args_tensor.extend(constants_tensor)
|
| 1170 |
+
"""
|
| 1171 |
+
|
| 1172 |
+
# Wrap the func to support setting result._boxed_call = True
|
| 1173 |
+
result.splice(
|
| 1174 |
+
f"""
|
| 1175 |
+
def _wrap_func(f):
|
| 1176 |
+
def g(args):
|
| 1177 |
+
{args_str}
|
| 1178 |
+
{return_str}
|
| 1179 |
+
return g
|
| 1180 |
+
call = _wrap_func(module.{self.call_func_name})
|
| 1181 |
+
"""
|
| 1182 |
+
)
|
| 1183 |
+
|
| 1184 |
+
def generate_extern_kernel_out(self, output_view, codegen_reference, args, kernel):
|
| 1185 |
+
if output_view:
|
| 1186 |
+
output_as_strided = f"{output_view.codegen_reference()}"
|
| 1187 |
+
output_name = f"{output_view.get_name()}_as_strided"
|
| 1188 |
+
self.writeline(f"auto {output_name} = {output_as_strided};")
|
| 1189 |
+
|
| 1190 |
+
args.insert(0, output_name)
|
| 1191 |
+
else:
|
| 1192 |
+
args.insert(0, f"{codegen_reference}")
|
| 1193 |
+
self.writeline(self.wrap_kernel_call(kernel, args))
|
| 1194 |
+
|
| 1195 |
+
def generate_scatter_fallback(
|
| 1196 |
+
self, output, inputs, kernel, fn, src_is_tensor, reduce, kwargs
|
| 1197 |
+
):
|
| 1198 |
+
# TODO: support other overload for cpp wrapper and remove the below assertions
|
| 1199 |
+
line = f"{kernel}({output}, {','.join(map(str, inputs))}"
|
| 1200 |
+
if fn == "aten.scatter_":
|
| 1201 |
+
if src_is_tensor:
|
| 1202 |
+
if reduce:
|
| 1203 |
+
line += f", {V.graph.wrapper_code.val_to_arg_str(reduce)}"
|
| 1204 |
+
else:
|
| 1205 |
+
assert (
|
| 1206 |
+
reduce is None
|
| 1207 |
+
), "Expect reduce to be None for aten.scatter_ with scalar src"
|
| 1208 |
+
else:
|
| 1209 |
+
line += f", {','.join(kwargs)}"
|
| 1210 |
+
line += f"){self.ending}"
|
| 1211 |
+
self.writeline(line)
|
| 1212 |
+
|
| 1213 |
+
def add_benchmark_harness(self, output):
|
| 1214 |
+
if V.graph.aot_mode:
|
| 1215 |
+
return
|
| 1216 |
+
super().add_benchmark_harness(output)
|
| 1217 |
+
|
| 1218 |
+
def codegen_sizevar(self, x: Expr) -> str:
|
| 1219 |
+
return self.expr_printer(V.graph.sizevars.simplify(x))
|
| 1220 |
+
|
| 1221 |
+
def codegen_tuple_access(self, basename: str, index: str) -> str:
|
| 1222 |
+
return f"std::get<{index}>({basename})"
|
| 1223 |
+
|
| 1224 |
+
def codegen_shape_tuple(self, shape: Tuple[Expr, ...]) -> str:
|
| 1225 |
+
parts = list(map(self.codegen_sizevar, shape))
|
| 1226 |
+
if len(parts) == 0:
|
| 1227 |
+
return "{}"
|
| 1228 |
+
if len(parts) == 1:
|
| 1229 |
+
return f"{{{parts[0]}, }}"
|
| 1230 |
+
return f"{{{', '.join(parts)}}}"
|
| 1231 |
+
|
| 1232 |
+
def make_buffer_free(self, buffer):
|
| 1233 |
+
return (
|
| 1234 |
+
""
|
| 1235 |
+
if isinstance(buffer.get_layout(), ir.MultiOutputLayout)
|
| 1236 |
+
else f"{buffer.get_name()}.reset();"
|
| 1237 |
+
)
|
| 1238 |
+
|
| 1239 |
+
def generate_profiler_mark_wrapper_call(self, stack):
|
| 1240 |
+
self.wrapper_call.writeline(
|
| 1241 |
+
'RECORD_FUNCTION("inductor_wrapper_call", c10::ArrayRef<c10::IValue>());'
|
| 1242 |
+
)
|
| 1243 |
+
|
| 1244 |
+
def codegen_device(self, device):
|
| 1245 |
+
from .cpp import DEVICE_TO_ATEN
|
| 1246 |
+
|
| 1247 |
+
return (
|
| 1248 |
+
f"c10::Device({DEVICE_TO_ATEN[device.type]}, {device.index})"
|
| 1249 |
+
if device.index is not None
|
| 1250 |
+
else f"{DEVICE_TO_ATEN[device.type]}"
|
| 1251 |
+
)
|
| 1252 |
+
|
| 1253 |
+
def codegen_tensor_option(self, device, dtype):
|
| 1254 |
+
from .cpp import DTYPE_TO_ATEN
|
| 1255 |
+
|
| 1256 |
+
cpp_device = self.codegen_device(device)
|
| 1257 |
+
return f"at::TensorOptions({cpp_device}).dtype({DTYPE_TO_ATEN[dtype]}))"
|
| 1258 |
+
|
| 1259 |
+
def make_buffer_allocation(self, buffer):
|
| 1260 |
+
name = buffer.get_name()
|
| 1261 |
+
# outputs are passed-in in the AOT mode
|
| 1262 |
+
if self.use_preallocated_ouput(buffer):
|
| 1263 |
+
output_idx = None
|
| 1264 |
+
output_buffer = None
|
| 1265 |
+
for idx, output in enumerate(V.graph.graph_outputs):
|
| 1266 |
+
if hasattr(output, "get_name") and name == output.get_name():
|
| 1267 |
+
output_idx = idx
|
| 1268 |
+
output_buffer = output
|
| 1269 |
+
break
|
| 1270 |
+
|
| 1271 |
+
assert (
|
| 1272 |
+
output_idx is not None and output_buffer is not None
|
| 1273 |
+
), "Unknown output index"
|
| 1274 |
+
if V.graph.sizevars.statically_known_leq(
|
| 1275 |
+
buffer.get_numel(), output_buffer.get_numel()
|
| 1276 |
+
):
|
| 1277 |
+
buf_str = f"auto {name} = outputs[{output_idx}];"
|
| 1278 |
+
# avoid resize_output warning:
|
| 1279 |
+
# "An output with one or more elements was resized since it had..."
|
| 1280 |
+
if buffer.get_size() != output_buffer.get_size():
|
| 1281 |
+
resize_to_args = ", ".join(
|
| 1282 |
+
self.expr_printer(d) for d in buffer.get_size()
|
| 1283 |
+
)
|
| 1284 |
+
buf_str += f" {name}.resize_({{{resize_to_args}}});"
|
| 1285 |
+
assert name not in self.resized_outputs
|
| 1286 |
+
self.resized_outputs[name] = list(output_buffer.get_size())
|
| 1287 |
+
return buf_str
|
| 1288 |
+
else:
|
| 1289 |
+
self.outputs_need_copy.add(name)
|
| 1290 |
+
|
| 1291 |
+
# TODO: map layout here.
|
| 1292 |
+
device = buffer.get_device()
|
| 1293 |
+
dtype = buffer.get_dtype()
|
| 1294 |
+
shape = tuple(buffer.get_size())
|
| 1295 |
+
stride = tuple(buffer.get_stride())
|
| 1296 |
+
return (
|
| 1297 |
+
f"{self.declare}{name} = {self.namespace}empty_strided("
|
| 1298 |
+
f"{self.codegen_shape_tuple(shape)}, "
|
| 1299 |
+
f"{self.codegen_shape_tuple(stride)}, "
|
| 1300 |
+
f"{self.codegen_tensor_option(device, dtype)};"
|
| 1301 |
+
)
|
| 1302 |
+
|
| 1303 |
+
def generate_extern_kernel_alloc_and_find_schema_if_needed(
|
| 1304 |
+
self,
|
| 1305 |
+
name,
|
| 1306 |
+
kernel,
|
| 1307 |
+
codegen_args,
|
| 1308 |
+
cpp_op_schema,
|
| 1309 |
+
cpp_kernel_key,
|
| 1310 |
+
cpp_kernel_overload_name="",
|
| 1311 |
+
):
|
| 1312 |
+
if cpp_kernel_key not in self.extern_call_ops:
|
| 1313 |
+
self.writeline(
|
| 1314 |
+
f"static auto op_{cpp_kernel_key} = c10::Dispatcher::singleton()"
|
| 1315 |
+
)
|
| 1316 |
+
self.writeline(
|
| 1317 |
+
f'\t.findSchemaOrThrow("{kernel}", "{cpp_kernel_overload_name}")'
|
| 1318 |
+
)
|
| 1319 |
+
self.writeline(f"\t.typed<{cpp_op_schema}>();")
|
| 1320 |
+
self.extern_call_ops.add(cpp_kernel_key)
|
| 1321 |
+
|
| 1322 |
+
self.writeline(
|
| 1323 |
+
f"auto {name} = op_{cpp_kernel_key}.call({', '.join(codegen_args)});"
|
| 1324 |
+
)
|
| 1325 |
+
|
| 1326 |
+
def val_to_arg_str(self, val):
|
| 1327 |
+
from .cpp import DTYPE_TO_ATEN
|
| 1328 |
+
|
| 1329 |
+
if val is None:
|
| 1330 |
+
# When None is passed as an argument, it represents an optional that does not contain a value.
|
| 1331 |
+
return self.optional_tensor_str
|
| 1332 |
+
elif isinstance(val, bool):
|
| 1333 |
+
return "true" if val else "false"
|
| 1334 |
+
elif isinstance(val, str):
|
| 1335 |
+
return f'"{val}"'
|
| 1336 |
+
elif isinstance(val, torch.device):
|
| 1337 |
+
return self.codegen_device(val)
|
| 1338 |
+
elif isinstance(val, torch.dtype):
|
| 1339 |
+
return DTYPE_TO_ATEN[val]
|
| 1340 |
+
elif isinstance(val, float) and val in [float("inf"), float("-inf")]:
|
| 1341 |
+
if val == float("inf"):
|
| 1342 |
+
return "std::numeric_limits<float>::infinity()"
|
| 1343 |
+
else:
|
| 1344 |
+
return "-std::numeric_limits<float>::infinity()"
|
| 1345 |
+
elif isinstance(val, (list, tuple)):
|
| 1346 |
+
return f"{{{', '.join(list(map(self.val_to_arg_str, val)))}}}"
|
| 1347 |
+
else:
|
| 1348 |
+
return repr(val)
|
| 1349 |
+
|
| 1350 |
+
|
| 1351 |
+
class CudaWrapperCodeGen(CppWrapperCodeGen):
|
| 1352 |
+
"""
|
| 1353 |
+
Generates cpp wrapper for running on GPU and calls CUDA kernels
|
| 1354 |
+
"""
|
| 1355 |
+
|
| 1356 |
+
def __init__(self):
|
| 1357 |
+
super().__init__()
|
| 1358 |
+
self.kernel_callsite_id = count()
|
| 1359 |
+
self.arg_var_id = count()
|
| 1360 |
+
self.cuda = True
|
| 1361 |
+
|
| 1362 |
+
def write_header(self):
|
| 1363 |
+
super().write_header()
|
| 1364 |
+
self.header.splice(
|
| 1365 |
+
"""
|
| 1366 |
+
#include <ATen/native/BinaryOps.h>
|
| 1367 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
| 1368 |
+
#include <c10/util/Exception.h>
|
| 1369 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 1370 |
+
|
| 1371 |
+
#define AT_CUDA_DRIVER_CHECK_OVERRIDE(EXPR) \\
|
| 1372 |
+
do { \\
|
| 1373 |
+
CUresult __err = EXPR; \\
|
| 1374 |
+
if (__err != CUDA_SUCCESS) { \\
|
| 1375 |
+
AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \\
|
| 1376 |
+
} \\
|
| 1377 |
+
} while (0)
|
| 1378 |
+
|
| 1379 |
+
static inline CUfunction loadKernel(
|
| 1380 |
+
const std::string &filePath,
|
| 1381 |
+
const std::string &funcName,
|
| 1382 |
+
int sharedMemBytes) {
|
| 1383 |
+
CUmodule mod;
|
| 1384 |
+
CUfunction func;
|
| 1385 |
+
AT_CUDA_DRIVER_CHECK_OVERRIDE(cuModuleLoad(&mod, filePath.c_str()));
|
| 1386 |
+
AT_CUDA_DRIVER_CHECK_OVERRIDE(cuModuleGetFunction(&func, mod, funcName.c_str()));
|
| 1387 |
+
if (sharedMemBytes > 0) {
|
| 1388 |
+
AT_CUDA_DRIVER_CHECK_OVERRIDE(cuFuncSetAttribute(
|
| 1389 |
+
func,
|
| 1390 |
+
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
|
| 1391 |
+
sharedMemBytes
|
| 1392 |
+
));
|
| 1393 |
+
}
|
| 1394 |
+
return func;
|
| 1395 |
+
}
|
| 1396 |
+
|
| 1397 |
+
static inline void launchKernel(
|
| 1398 |
+
CUfunction func,
|
| 1399 |
+
int gridX,
|
| 1400 |
+
int gridY,
|
| 1401 |
+
int gridZ,
|
| 1402 |
+
int numWarps,
|
| 1403 |
+
int sharedMemBytes,
|
| 1404 |
+
void* args[],
|
| 1405 |
+
cudaStream_t stream) {
|
| 1406 |
+
AT_CUDA_DRIVER_CHECK_OVERRIDE(cuLaunchKernel(
|
| 1407 |
+
func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr));
|
| 1408 |
+
}
|
| 1409 |
+
"""
|
| 1410 |
+
)
|
| 1411 |
+
|
| 1412 |
+
def write_get_cuda_stream(self, index):
|
| 1413 |
+
name = f"stream{index}"
|
| 1414 |
+
self.writeline(
|
| 1415 |
+
f"cudaStream_t {name} = at::cuda::getCurrentCUDAStream({index});"
|
| 1416 |
+
)
|
| 1417 |
+
return name
|
| 1418 |
+
|
| 1419 |
+
def define_kernel(
|
| 1420 |
+
self, name: str, kernel: str, metadata: Optional[str] = None, cuda=True
|
| 1421 |
+
):
|
| 1422 |
+
if not cuda:
|
| 1423 |
+
return super().define_kernel(name, kernel, metadata, cuda)
|
| 1424 |
+
|
| 1425 |
+
def generate(self):
|
| 1426 |
+
self.prefix.writeline("\n")
|
| 1427 |
+
for kernel in self.src_to_kernel.values():
|
| 1428 |
+
self.prefix.writeline(f"static CUfunction {kernel} = nullptr;")
|
| 1429 |
+
self.prefix.writeline("\n")
|
| 1430 |
+
return super().generate()
|
| 1431 |
+
|
| 1432 |
+
def generate_load_kernel(self, name, params):
|
| 1433 |
+
mangled_name = params.get("mangled_name", None)
|
| 1434 |
+
assert mangled_name is not None, "missing mangled_name"
|
| 1435 |
+
cubin_path = params.get("cubin_path", None)
|
| 1436 |
+
assert os.path.exists(
|
| 1437 |
+
cubin_path
|
| 1438 |
+
), "cubin file should already exist at this moment"
|
| 1439 |
+
|
| 1440 |
+
shared_mem = params.get("shared_mem", 0)
|
| 1441 |
+
self.writeline(f"if ({name} == nullptr) {{")
|
| 1442 |
+
self.writeline(
|
| 1443 |
+
f""" {name} = loadKernel("{cubin_path}", "{mangled_name}", {shared_mem});"""
|
| 1444 |
+
)
|
| 1445 |
+
self.writeline("}")
|
| 1446 |
+
|
| 1447 |
+
def generate_args_decl(self, call_args):
|
| 1448 |
+
# TODO: only works for constant now, need type info
|
| 1449 |
+
new_args = []
|
| 1450 |
+
for arg in call_args:
|
| 1451 |
+
var_name = f"var_{next(self.arg_var_id)}"
|
| 1452 |
+
if isinstance(
|
| 1453 |
+
arg,
|
| 1454 |
+
(
|
| 1455 |
+
sympy.Integer,
|
| 1456 |
+
sympy.Symbol,
|
| 1457 |
+
SymbolicCallArg,
|
| 1458 |
+
),
|
| 1459 |
+
):
|
| 1460 |
+
self.writeline(f"auto {var_name} = {arg};")
|
| 1461 |
+
elif is_int(arg):
|
| 1462 |
+
self.writeline(f"int {var_name} = {arg};")
|
| 1463 |
+
elif is_float(arg):
|
| 1464 |
+
self.writeline(f"float {var_name} = {arg};")
|
| 1465 |
+
else:
|
| 1466 |
+
self.writeline(
|
| 1467 |
+
f"CUdeviceptr {var_name} = reinterpret_cast<CUdeviceptr>({arg}.data_ptr());"
|
| 1468 |
+
)
|
| 1469 |
+
new_args.append(f"&{var_name}")
|
| 1470 |
+
|
| 1471 |
+
return ", ".join(new_args)
|
| 1472 |
+
|
| 1473 |
+
def generate_kernel_call(
|
| 1474 |
+
self, name, call_args, grid=None, device_index=None, cuda=True
|
| 1475 |
+
):
|
| 1476 |
+
if not cuda:
|
| 1477 |
+
return super().generate_kernel_call(
|
| 1478 |
+
name, call_args, grid, device_index, cuda
|
| 1479 |
+
)
|
| 1480 |
+
|
| 1481 |
+
params = CudaKernelParamCache.get(name)
|
| 1482 |
+
assert (
|
| 1483 |
+
params is not None
|
| 1484 |
+
), f"cuda kernel parameters for {name} should already exist at this moment"
|
| 1485 |
+
|
| 1486 |
+
self.generate_load_kernel(name, params)
|
| 1487 |
+
|
| 1488 |
+
call_args = self.generate_args_decl(call_args)
|
| 1489 |
+
kernel_args_var = f"kernel_args_var_{next(self.kernel_callsite_id)}"
|
| 1490 |
+
self.writeline(f"void* {kernel_args_var}[] = {{{call_args}}};")
|
| 1491 |
+
stream = (
|
| 1492 |
+
"stream" if V.graph.aot_mode else self.write_get_cuda_stream(device_index)
|
| 1493 |
+
)
|
| 1494 |
+
self.writeline(
|
| 1495 |
+
"launchKernel({}, {}, {}, {}, {}, {}, {}, {});".format(
|
| 1496 |
+
name,
|
| 1497 |
+
params["grid_x"],
|
| 1498 |
+
params["grid_y"],
|
| 1499 |
+
params["grid_z"],
|
| 1500 |
+
params["num_warps"],
|
| 1501 |
+
params["shared_mem"],
|
| 1502 |
+
kernel_args_var,
|
| 1503 |
+
stream,
|
| 1504 |
+
)
|
| 1505 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/coordinate_descent_tuner.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Callable, Optional
|
| 5 |
+
|
| 6 |
+
from .utils import has_triton, red_text, triton_config_to_hashable
|
| 7 |
+
|
| 8 |
+
if has_triton():
|
| 9 |
+
import triton
|
| 10 |
+
else:
|
| 11 |
+
triton = None
|
| 12 |
+
|
| 13 |
+
from . import config as inductor_config
|
| 14 |
+
|
| 15 |
+
log = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_field(config, name):
|
| 19 |
+
if name == "num_warps":
|
| 20 |
+
return config.num_warps
|
| 21 |
+
elif name == "num_stages":
|
| 22 |
+
return config.num_stages
|
| 23 |
+
else:
|
| 24 |
+
return config.kwargs.get(name, None)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def set_field(config, name, value):
|
| 28 |
+
if name == "num_warps":
|
| 29 |
+
config.num_warps = value
|
| 30 |
+
elif name == "num_stages":
|
| 31 |
+
config.num_stages = value
|
| 32 |
+
else:
|
| 33 |
+
config.kwargs[name] = value
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class CoordescTuner:
|
| 37 |
+
"""
|
| 38 |
+
The coordinate descent tuner. Tune one field/coordinate at a time.
|
| 39 |
+
|
| 40 |
+
TODO will it be necessary to tune multiple fields simultanuously.
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
TODO: what if both increasing and descreasing a field can improve perf.
|
| 44 |
+
i.e., there are multiple local optima..
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
def __init__(self, is_mm=False, name="unknown", size_hints=None):
|
| 48 |
+
self.is_mm = is_mm # we will tune num_stages for mm
|
| 49 |
+
self.cached_benchmark_results = {}
|
| 50 |
+
self.name = name
|
| 51 |
+
self.size_hints = size_hints
|
| 52 |
+
|
| 53 |
+
def get_xmax(self):
|
| 54 |
+
xmax = inductor_config.triton.max_block["X"]
|
| 55 |
+
if self.size_hints and len(self.size_hints) > 0:
|
| 56 |
+
xmax = min(xmax, self.size_hints[0])
|
| 57 |
+
return xmax
|
| 58 |
+
|
| 59 |
+
def get_ymax(self):
|
| 60 |
+
ymax = inductor_config.triton.max_block["Y"]
|
| 61 |
+
if self.size_hints and len(self.size_hints) > 1:
|
| 62 |
+
ymax = min(ymax, self.size_hints[1])
|
| 63 |
+
return ymax
|
| 64 |
+
|
| 65 |
+
def get_zmax(self):
|
| 66 |
+
zmax = inductor_config.triton.max_block["Z"]
|
| 67 |
+
if self.size_hints and len(self.size_hints) > 2:
|
| 68 |
+
zmax = min(zmax, self.size_hints[2])
|
| 69 |
+
return zmax
|
| 70 |
+
|
| 71 |
+
def get_rmax(self):
|
| 72 |
+
if self.size_hints and len(self.size_hints) > 0:
|
| 73 |
+
return self.size_hints[-1] # the last one is for reduction
|
| 74 |
+
else:
|
| 75 |
+
# large enough. We should not pick this large RBLOCK anyway
|
| 76 |
+
return 2**30
|
| 77 |
+
|
| 78 |
+
def cache_benchmark_result(self, config, timing):
|
| 79 |
+
self.cached_benchmark_results[triton_config_to_hashable(config)] = timing
|
| 80 |
+
|
| 81 |
+
def lookup_in_cache(self, config):
|
| 82 |
+
return self.cached_benchmark_results.get(triton_config_to_hashable(config))
|
| 83 |
+
|
| 84 |
+
def call_func(self, func, config):
|
| 85 |
+
found = self.lookup_in_cache(config)
|
| 86 |
+
if found is not None:
|
| 87 |
+
log.debug(" CACHED")
|
| 88 |
+
return found
|
| 89 |
+
timing = func(config)
|
| 90 |
+
self.cache_benchmark_result(config, timing)
|
| 91 |
+
return timing
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def tunable_fields(self):
|
| 95 |
+
out = [
|
| 96 |
+
"XBLOCK",
|
| 97 |
+
"YBLOCK",
|
| 98 |
+
"ZBLOCK",
|
| 99 |
+
# NOTE: we should not tune RBLOCK for persistent reduction.
|
| 100 |
+
# We rely on the fact that persistent reduction's triton.Config
|
| 101 |
+
# does not have the RBLOCK field to guarantee that.
|
| 102 |
+
"RBLOCK",
|
| 103 |
+
# the following 3 are for mm
|
| 104 |
+
"BLOCK_M",
|
| 105 |
+
"BLOCK_N",
|
| 106 |
+
"BLOCK_K",
|
| 107 |
+
"num_warps",
|
| 108 |
+
]
|
| 109 |
+
if self.is_mm:
|
| 110 |
+
out.append("num_stages")
|
| 111 |
+
|
| 112 |
+
return out
|
| 113 |
+
|
| 114 |
+
def value_too_large(self, name, val):
|
| 115 |
+
if name == "XBLOCK":
|
| 116 |
+
return val > self.get_xmax()
|
| 117 |
+
if name == "YBLOCK":
|
| 118 |
+
return val > self.get_ymax()
|
| 119 |
+
if name == "ZBLOCK":
|
| 120 |
+
return val > self.get_zmax()
|
| 121 |
+
if name == "RBLOCK":
|
| 122 |
+
return val > self.get_rmax()
|
| 123 |
+
|
| 124 |
+
return False
|
| 125 |
+
|
| 126 |
+
def get_neighbour_values(self, name, orig_val, radius=1, include_self=False):
|
| 127 |
+
"""
|
| 128 |
+
Get neighbour values in 'radius' steps. The original value is not
|
| 129 |
+
returned as it's own neighbour.
|
| 130 |
+
"""
|
| 131 |
+
assert radius >= 1
|
| 132 |
+
|
| 133 |
+
def update(cur_val, inc=True):
|
| 134 |
+
if name == "num_stages":
|
| 135 |
+
if inc:
|
| 136 |
+
return cur_val + 1
|
| 137 |
+
else:
|
| 138 |
+
return cur_val - 1
|
| 139 |
+
else:
|
| 140 |
+
if inc:
|
| 141 |
+
return cur_val * 2
|
| 142 |
+
else:
|
| 143 |
+
return cur_val // 2
|
| 144 |
+
|
| 145 |
+
out = []
|
| 146 |
+
# increment loop
|
| 147 |
+
cur_val = orig_val
|
| 148 |
+
for _ in range(radius):
|
| 149 |
+
cur_val = update(cur_val, True)
|
| 150 |
+
if self.value_too_large(name, cur_val):
|
| 151 |
+
break
|
| 152 |
+
out.append(cur_val)
|
| 153 |
+
|
| 154 |
+
# decrement loop
|
| 155 |
+
cur_val = orig_val
|
| 156 |
+
for _ in range(radius):
|
| 157 |
+
cur_val = update(cur_val, False)
|
| 158 |
+
if cur_val <= 0:
|
| 159 |
+
break
|
| 160 |
+
out.append(cur_val)
|
| 161 |
+
|
| 162 |
+
if include_self:
|
| 163 |
+
out.append(orig_val)
|
| 164 |
+
return out
|
| 165 |
+
|
| 166 |
+
@staticmethod
|
| 167 |
+
def has_improvement(baseline, test):
|
| 168 |
+
threshold = 0.001 # 0.1%
|
| 169 |
+
return test is not None and test < baseline * (1 - threshold)
|
| 170 |
+
|
| 171 |
+
def check_all_tuning_directions(
|
| 172 |
+
self,
|
| 173 |
+
func: Callable[["triton.Config"], float],
|
| 174 |
+
best_config,
|
| 175 |
+
best_timing,
|
| 176 |
+
):
|
| 177 |
+
"""
|
| 178 |
+
Check all directions. We only do this once the regular coordinate
|
| 179 |
+
descent tuning find no better choices any more.
|
| 180 |
+
We only have a few tunable fields, so this should be fine.
|
| 181 |
+
"""
|
| 182 |
+
candidate_values_list = []
|
| 183 |
+
effective_fields = []
|
| 184 |
+
for field in self.tunable_fields:
|
| 185 |
+
old_value = get_field(best_config, field)
|
| 186 |
+
if old_value is None:
|
| 187 |
+
continue
|
| 188 |
+
candidate_values = self.get_neighbour_values(
|
| 189 |
+
field,
|
| 190 |
+
old_value,
|
| 191 |
+
radius=inductor_config.coordinate_descent_search_radius,
|
| 192 |
+
include_self=True,
|
| 193 |
+
)
|
| 194 |
+
candidate_values_list.append(candidate_values)
|
| 195 |
+
effective_fields.append(field)
|
| 196 |
+
|
| 197 |
+
choices = itertools.product(*candidate_values_list)
|
| 198 |
+
improved = False
|
| 199 |
+
for choice in choices:
|
| 200 |
+
assert len(choice) == len(effective_fields)
|
| 201 |
+
candidate_config = copy.deepcopy(best_config)
|
| 202 |
+
for new_val, field in zip(choice, effective_fields):
|
| 203 |
+
set_field(candidate_config, field, new_val)
|
| 204 |
+
cmp_res, candidate_timing = self.compare_config(
|
| 205 |
+
func, candidate_config, best_config, best_timing
|
| 206 |
+
)
|
| 207 |
+
if cmp_res:
|
| 208 |
+
improved = True
|
| 209 |
+
best_config = candidate_config
|
| 210 |
+
best_timing = candidate_timing
|
| 211 |
+
|
| 212 |
+
return improved, best_config, best_timing
|
| 213 |
+
|
| 214 |
+
def compare_config(self, func, candidate_config, best_config, best_timing):
|
| 215 |
+
"""
|
| 216 |
+
Check if candidate_config is better than best_config.
|
| 217 |
+
|
| 218 |
+
Return a touple of (compare_result, candidate_timing).
|
| 219 |
+
compare_result is true iff condidate_config is better.
|
| 220 |
+
"""
|
| 221 |
+
log.debug("Try config %s", candidate_config)
|
| 222 |
+
try:
|
| 223 |
+
candidate_timing = self.call_func(func, candidate_config)
|
| 224 |
+
except Exception as e:
|
| 225 |
+
log.debug("Got exception %s", e)
|
| 226 |
+
return False, float("inf")
|
| 227 |
+
|
| 228 |
+
if self.has_improvement(best_timing, candidate_timing):
|
| 229 |
+
log.debug(
|
| 230 |
+
"Tune from %s %f -> %s %f",
|
| 231 |
+
best_config,
|
| 232 |
+
best_timing,
|
| 233 |
+
candidate_config,
|
| 234 |
+
candidate_timing,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
return True, candidate_timing
|
| 238 |
+
return False, candidate_timing
|
| 239 |
+
|
| 240 |
+
def autotune(
|
| 241 |
+
self,
|
| 242 |
+
func: Callable[["triton.Config"], float],
|
| 243 |
+
baseline_config: "triton.Config",
|
| 244 |
+
baseline_timing: Optional[float] = None,
|
| 245 |
+
) -> "triton.Config":
|
| 246 |
+
if baseline_timing is None:
|
| 247 |
+
baseline_timing = self.call_func(func, baseline_config)
|
| 248 |
+
|
| 249 |
+
log.debug("= Do coordinate descent tuning for %s =", self.name)
|
| 250 |
+
log.debug(
|
| 251 |
+
"Baseline Config %s, baseline timing %f", baseline_config, baseline_timing
|
| 252 |
+
)
|
| 253 |
+
improved = True
|
| 254 |
+
best_config = baseline_config
|
| 255 |
+
best_timing = baseline_timing
|
| 256 |
+
tunable_fields = self.tunable_fields
|
| 257 |
+
|
| 258 |
+
while improved:
|
| 259 |
+
improved = False
|
| 260 |
+
|
| 261 |
+
for name in tunable_fields:
|
| 262 |
+
cur_val = get_field(best_config, name)
|
| 263 |
+
# some kernel don't have RBLOCK/YBLOCK/ZBLOCK. So cur_val may be None
|
| 264 |
+
if cur_val is None:
|
| 265 |
+
continue
|
| 266 |
+
|
| 267 |
+
# It's possible that candidate_values is empty.
|
| 268 |
+
# E.g., if XBLOCK is 1 initially and size_hint for x is also 1.
|
| 269 |
+
# We would not try either larger or smaller XBLOCK in this case.
|
| 270 |
+
candidate_values = self.get_neighbour_values(name, cur_val)
|
| 271 |
+
|
| 272 |
+
for next_val in candidate_values:
|
| 273 |
+
candidate_config = copy.deepcopy(best_config)
|
| 274 |
+
set_field(candidate_config, name, next_val)
|
| 275 |
+
|
| 276 |
+
cmp_res, candidate_timing = self.compare_config(
|
| 277 |
+
func, candidate_config, best_config, best_timing
|
| 278 |
+
)
|
| 279 |
+
if cmp_res:
|
| 280 |
+
improved = True
|
| 281 |
+
best_config, best_timing = candidate_config, candidate_timing
|
| 282 |
+
|
| 283 |
+
if not improved and inductor_config.coordinate_descent_check_all_directions:
|
| 284 |
+
old_best_timing = best_timing
|
| 285 |
+
improved, best_config, best_timing = self.check_all_tuning_directions(
|
| 286 |
+
func, best_config, best_timing
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
if improved:
|
| 290 |
+
msg = red_text(
|
| 291 |
+
"Coordinate descend tuning found improvement of %.3fx by looking in all directions."
|
| 292 |
+
)
|
| 293 |
+
log.debug(
|
| 294 |
+
msg,
|
| 295 |
+
old_best_timing / best_timing,
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
log.debug(
|
| 299 |
+
"Improve from %s %f -> %s %f, %.3fx",
|
| 300 |
+
baseline_config,
|
| 301 |
+
baseline_timing,
|
| 302 |
+
best_config,
|
| 303 |
+
best_timing,
|
| 304 |
+
baseline_timing / best_timing,
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
return best_config
|
llava_next/lib/python3.10/site-packages/torch/_inductor/cuda_properties.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from typing import Dict, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch.cuda import _CudaDeviceProperties
|
| 6 |
+
|
| 7 |
+
# API to query cuda properties that will work in a triton compile process
|
| 8 |
+
# that cannot use the GPU APIs (due to processing fork() and initialization
|
| 9 |
+
# time issues). Properties are recorded in the main process before
|
| 10 |
+
# we fork the workers.
|
| 11 |
+
|
| 12 |
+
_compile_worker_current_device: Optional[int] = None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@functools.lru_cache(None)
|
| 16 |
+
def _properties() -> Dict[int, _CudaDeviceProperties]:
|
| 17 |
+
if not torch.cuda.is_available():
|
| 18 |
+
return {}
|
| 19 |
+
try:
|
| 20 |
+
return {
|
| 21 |
+
i: torch.cuda.get_device_properties(i)
|
| 22 |
+
for i in range(torch.cuda.device_count())
|
| 23 |
+
}
|
| 24 |
+
except RuntimeError:
|
| 25 |
+
return {}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def set_compiler_worker_current_device(device: int) -> None:
|
| 29 |
+
global _compile_worker_current_device
|
| 30 |
+
_compile_worker_current_device = device
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def current_device() -> int:
|
| 34 |
+
if _compile_worker_current_device is not None:
|
| 35 |
+
return _compile_worker_current_device
|
| 36 |
+
return torch.cuda.current_device()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _device(device: Optional[Union[torch.device, int]]) -> int:
|
| 40 |
+
if device is not None:
|
| 41 |
+
if isinstance(device, torch.device):
|
| 42 |
+
assert device.type == "cuda"
|
| 43 |
+
device = device.index
|
| 44 |
+
return device
|
| 45 |
+
return current_device()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_device_properties(
|
| 49 |
+
device: Optional[Union[torch.device, int]] = None
|
| 50 |
+
) -> _CudaDeviceProperties:
|
| 51 |
+
return _properties()[_device(device)]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_device_capability(
|
| 55 |
+
device: Optional[Union[torch.device, int]] = None
|
| 56 |
+
) -> Tuple[int, int]:
|
| 57 |
+
p = get_device_properties(device)
|
| 58 |
+
return p.major, p.minor
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__init__.py
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/binary_folding.cpython-310.pyc
ADDED
|
Binary file (6.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/freezing_patterns.cpython-310.pyc
ADDED
|
Binary file (5.18 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/fuse_attention.cpython-310.pyc
ADDED
|
Binary file (10.3 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/group_batch_fusion.cpython-310.pyc
ADDED
|
Binary file (15.5 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/joint_graph.cpython-310.pyc
ADDED
|
Binary file (6.04 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/mkldnn_fusion.cpython-310.pyc
ADDED
|
Binary file (26.6 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pad_mm.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/post_grad.cpython-310.pyc
ADDED
|
Binary file (12.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/pre_grad.cpython-310.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/quantization.cpython-310.pyc
ADDED
|
Binary file (19.2 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/replace_random.cpython-310.pyc
ADDED
|
Binary file (3.56 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/__pycache__/split_cat.cpython-310.pyc
ADDED
|
Binary file (24.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/binary_folding.py
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from ..._dynamo.utils import counters
|
| 6 |
+
|
| 7 |
+
from ..pattern_matcher import Arg, CallFunction, KeywordArg
|
| 8 |
+
from .freezing_patterns import register_binary_folding_pattern
|
| 9 |
+
|
| 10 |
+
aten = torch.ops.aten
|
| 11 |
+
prims = torch.ops.prims
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def mark_mixed_dtype_conv(conv):
|
| 15 |
+
conv_dtype = conv.meta["val"].dtype
|
| 16 |
+
if conv_dtype not in (torch.float16, torch.bfloat16):
|
| 17 |
+
return
|
| 18 |
+
|
| 19 |
+
if not len(conv.users) == 1:
|
| 20 |
+
return
|
| 21 |
+
|
| 22 |
+
conv_user = next(iter(conv.users.keys()))
|
| 23 |
+
if not isinstance(conv_user.meta["val"], torch.Tensor):
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
if not conv_user.meta["val"].dtype == torch.float32:
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
while conv_user.target in _binary_ops:
|
| 30 |
+
if not len(conv_user.users) == 1:
|
| 31 |
+
return
|
| 32 |
+
|
| 33 |
+
conv_user = next(iter(conv_user.users.keys()))
|
| 34 |
+
|
| 35 |
+
if not (
|
| 36 |
+
conv_user.target == prims.convert_element_type.default
|
| 37 |
+
and conv_user.args[1] == conv_dtype
|
| 38 |
+
):
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
conv.meta["_allow_conv_mixed_dtype_folding"] = conv_dtype
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def mark_mixed_dtype_allowed_convs(gm):
|
| 45 |
+
"""
|
| 46 |
+
Mark convolutions which we will binary fold even with mixed precision constants. We constant fold in the higher precision
|
| 47 |
+
for better accuracy and then recover the original precision after.
|
| 48 |
+
"""
|
| 49 |
+
for node in gm.graph.nodes:
|
| 50 |
+
if node.target is aten.convolution.default:
|
| 51 |
+
mark_mixed_dtype_conv(node)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def recover_original_precision_folded_convs(gm):
|
| 55 |
+
"""
|
| 56 |
+
After binary folding conv weights and biases to a higher dtype, recover the original precision they were in.
|
| 57 |
+
"""
|
| 58 |
+
graph = gm.graph
|
| 59 |
+
convs = [node for node in graph.nodes if node.target is aten.convolution.default]
|
| 60 |
+
for node in convs:
|
| 61 |
+
orig_dtype = node.meta.get("_allow_conv_mixed_dtype_folding", None)
|
| 62 |
+
if orig_dtype is None:
|
| 63 |
+
continue
|
| 64 |
+
|
| 65 |
+
with graph.inserting_before(node):
|
| 66 |
+
for idx in [1, 2]:
|
| 67 |
+
old_input = node.args[idx]
|
| 68 |
+
if old_input is None:
|
| 69 |
+
continue
|
| 70 |
+
|
| 71 |
+
new_input = graph.create_node(
|
| 72 |
+
"call_function",
|
| 73 |
+
prims.convert_element_type.default,
|
| 74 |
+
(old_input, orig_dtype),
|
| 75 |
+
)
|
| 76 |
+
node.replace_input_with(old_input, new_input)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
_binary_ops = [aten.add.Tensor, aten.sub.Tensor, aten.mul.Tensor, aten.div.Tensor]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@functools.lru_cache(None)
|
| 83 |
+
def binary_folding_init():
|
| 84 |
+
_conv_args = [Arg() for _ in range(9)]
|
| 85 |
+
_computation_ops = [aten.convolution.default]
|
| 86 |
+
_computation_calls = [CallFunction(aten.convolution.default, *_conv_args, _users=1)]
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
In order to fuse add/sub/mul/div with conv, the dimensions of its
|
| 90 |
+
constant tensor must satisfy the following:
|
| 91 |
+
- with resizing, broadcast to w/ weight/bias tensor shape
|
| 92 |
+
- broadcast to the conv output shape
|
| 93 |
+
It needs to have a shape that can resize to weight/bias
|
| 94 |
+
tensor shape because we need to run the op with the conv
|
| 95 |
+
weights/bias without changing their sizes.
|
| 96 |
+
It needs to broadcast to the conv output shape so that we do
|
| 97 |
+
accidentally change the shape of op output by pre-fusing it
|
| 98 |
+
compared to eager.
|
| 99 |
+
The only dimension value shared by weight/bias/conv output
|
| 100 |
+
is they all contain a dim with value = channels-out. In the
|
| 101 |
+
conv output tensor, this is in the second dimension,
|
| 102 |
+
so the pointwise op tensor may have a second dimension of
|
| 103 |
+
value == channels-out, but all the other dimensions have to be 1
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def _op_not_broadcasting_with_conv(weight_tensor, other_tensor):
|
| 107 |
+
# According to opDoesNotBroadCastWithConv of frozen_conv_folding.cpp
|
| 108 |
+
weight_shape = weight_tensor.shape
|
| 109 |
+
other_shape = other_tensor.shape
|
| 110 |
+
if len(weight_shape) < len(other_shape):
|
| 111 |
+
return False
|
| 112 |
+
if len(weight_shape) == len(other_shape) + 1:
|
| 113 |
+
# weight shape is [o, i, *], other_shape is [o, 1...].
|
| 114 |
+
for i in reversed(range(len(other_shape))):
|
| 115 |
+
if i == 0 and weight_shape[0] == other_shape[i]:
|
| 116 |
+
continue
|
| 117 |
+
if other_shape[i] != 1:
|
| 118 |
+
return False
|
| 119 |
+
else:
|
| 120 |
+
# weight shape is [o, i, *], other_shape is [1, i, *]
|
| 121 |
+
for i in reversed(range(len(other_shape))):
|
| 122 |
+
if i == 1 and weight_shape[0] == other_shape[i]:
|
| 123 |
+
continue
|
| 124 |
+
if other_shape[i] != 1:
|
| 125 |
+
return False
|
| 126 |
+
return True
|
| 127 |
+
|
| 128 |
+
def _check_conv_and_broadcast_op(conv_node, other):
|
| 129 |
+
# According to checkConvAndBroadcastingOpPreConditions of frozen_conv_folding.cpp.
|
| 130 |
+
# conv.weight
|
| 131 |
+
if conv_node.args[1].op != "get_attr":
|
| 132 |
+
return False
|
| 133 |
+
# conv.bias
|
| 134 |
+
if conv_node.args[1] is not None and conv_node.args[1].op != "get_attr":
|
| 135 |
+
return False
|
| 136 |
+
if (
|
| 137 |
+
not isinstance(other, int)
|
| 138 |
+
and not isinstance(other, float)
|
| 139 |
+
and other.op != "get_attr"
|
| 140 |
+
):
|
| 141 |
+
return False
|
| 142 |
+
|
| 143 |
+
weight_meta_value = conv_node.args[1].meta.get("val")
|
| 144 |
+
if weight_meta_value is None:
|
| 145 |
+
return False
|
| 146 |
+
# Avoid fusing op that causes type promotion
|
| 147 |
+
# restricting to float avoids int/float difficulties with scalar overload
|
| 148 |
+
if not weight_meta_value.is_floating_point():
|
| 149 |
+
return False
|
| 150 |
+
if isinstance(other, torch.fx.Node) and other.op == "get_attr":
|
| 151 |
+
other_meta_value = other.meta.get("val")
|
| 152 |
+
if not other_meta_value.is_floating_point():
|
| 153 |
+
return False
|
| 154 |
+
if (
|
| 155 |
+
torch.promote_types(other_meta_value.dtype, weight_meta_value.dtype)
|
| 156 |
+
!= weight_meta_value.dtype
|
| 157 |
+
):
|
| 158 |
+
if not conv_node.meta.get("_allow_conv_mixed_dtype_folding", False):
|
| 159 |
+
return False
|
| 160 |
+
|
| 161 |
+
if (
|
| 162 |
+
other_meta_value.dtype != torch.float
|
| 163 |
+
and weight_meta_value.dtype not in (torch.float16, torch.bfloat16)
|
| 164 |
+
):
|
| 165 |
+
return False
|
| 166 |
+
|
| 167 |
+
if not _op_not_broadcasting_with_conv(weight_meta_value, other_meta_value):
|
| 168 |
+
return False
|
| 169 |
+
else:
|
| 170 |
+
# TODO: support scalar case
|
| 171 |
+
return False
|
| 172 |
+
|
| 173 |
+
return True
|
| 174 |
+
|
| 175 |
+
def _is_foldable_pattern(match):
|
| 176 |
+
binary_node = match.output_node()
|
| 177 |
+
computation_node = binary_node.args[0]
|
| 178 |
+
other = binary_node.args[1]
|
| 179 |
+
if binary_node.args[0].target not in _computation_ops:
|
| 180 |
+
computation_node = binary_node.args[1]
|
| 181 |
+
other = binary_node.args[0]
|
| 182 |
+
if binary_node.args[0].target == aten.convolution.default:
|
| 183 |
+
return _check_conv_and_broadcast_op(computation_node, other)
|
| 184 |
+
|
| 185 |
+
return False
|
| 186 |
+
|
| 187 |
+
def resize_scalar_or_tensor_to_shape(graph, other, shape):
|
| 188 |
+
# TODO: support scalar case
|
| 189 |
+
if other.meta.get("val").numel() == 1:
|
| 190 |
+
# expand errors if the shape input has less # dims than the tensor input
|
| 191 |
+
res = graph.create_node(
|
| 192 |
+
"call_function",
|
| 193 |
+
aten.reshape.default,
|
| 194 |
+
(other, (1,)),
|
| 195 |
+
)
|
| 196 |
+
res = graph.create_node(
|
| 197 |
+
"call_function",
|
| 198 |
+
aten.expand.default,
|
| 199 |
+
(res, shape),
|
| 200 |
+
)
|
| 201 |
+
else:
|
| 202 |
+
res = graph.create_node(
|
| 203 |
+
"call_function",
|
| 204 |
+
aten.reshape.default,
|
| 205 |
+
(other, shape),
|
| 206 |
+
)
|
| 207 |
+
return res
|
| 208 |
+
|
| 209 |
+
def _create_new_conv_node(graph, conv_node, binary_node, other):
|
| 210 |
+
assert conv_node.target == aten.convolution.default
|
| 211 |
+
conv_args = list(conv_node.args)
|
| 212 |
+
weight_meta_value = conv_node.args[1].meta.get("val")
|
| 213 |
+
bias = conv_args[2]
|
| 214 |
+
if binary_node.target in [aten.add.Tensor, aten.sub.Tensor]:
|
| 215 |
+
other_reshape = resize_scalar_or_tensor_to_shape(
|
| 216 |
+
graph, other, (weight_meta_value.size(0),)
|
| 217 |
+
)
|
| 218 |
+
new_bias = graph.create_node(
|
| 219 |
+
"call_function",
|
| 220 |
+
binary_node.target,
|
| 221 |
+
(0 if bias is None else bias, other_reshape),
|
| 222 |
+
)
|
| 223 |
+
conv_args[2] = new_bias
|
| 224 |
+
else:
|
| 225 |
+
assert binary_node.target in [aten.mul.Tensor, aten.div.Tensor]
|
| 226 |
+
weight_broadcast_shape = [1 for _ in range(len(weight_meta_value.shape))]
|
| 227 |
+
weight_broadcast_shape[0] = weight_meta_value.size(0)
|
| 228 |
+
other_reshape1 = resize_scalar_or_tensor_to_shape(
|
| 229 |
+
graph, other, tuple(weight_broadcast_shape)
|
| 230 |
+
)
|
| 231 |
+
new_weight = graph.create_node(
|
| 232 |
+
"call_function", binary_node.target, (conv_args[1], other_reshape1)
|
| 233 |
+
)
|
| 234 |
+
new_weight.meta.update(conv_args[1].meta)
|
| 235 |
+
conv_args[1] = new_weight
|
| 236 |
+
if bias is not None:
|
| 237 |
+
other_reshape = resize_scalar_or_tensor_to_shape(
|
| 238 |
+
graph, other, (weight_meta_value.size(0),)
|
| 239 |
+
)
|
| 240 |
+
new_bias = graph.create_node(
|
| 241 |
+
"call_function", binary_node.target, (bias, other_reshape)
|
| 242 |
+
)
|
| 243 |
+
new_bias.meta.update(bias.meta)
|
| 244 |
+
conv_args[2] = new_bias
|
| 245 |
+
return graph.create_node("call_function", conv_node.target, tuple(conv_args))
|
| 246 |
+
|
| 247 |
+
for _computation_call, binary_op in itertools.product(
|
| 248 |
+
_computation_calls, _binary_ops
|
| 249 |
+
):
|
| 250 |
+
|
| 251 |
+
@register_binary_folding_pattern(
|
| 252 |
+
CallFunction(binary_op, _computation_call, KeywordArg("other")),
|
| 253 |
+
extra_check=_is_foldable_pattern,
|
| 254 |
+
)
|
| 255 |
+
def folded_op(match, *args, **kwargs):
|
| 256 |
+
counters["inductor"]["binary_folding"] += 1
|
| 257 |
+
other = kwargs.get("other")
|
| 258 |
+
binary_node = match.output_node()
|
| 259 |
+
computation_node = (
|
| 260 |
+
binary_node.args[0]
|
| 261 |
+
if binary_node.args[0].target in _computation_ops
|
| 262 |
+
else binary_node.args[1]
|
| 263 |
+
)
|
| 264 |
+
graph = match.graph
|
| 265 |
+
with graph.inserting_before(binary_node):
|
| 266 |
+
# TODO: support linear?
|
| 267 |
+
assert computation_node.target == aten.convolution.default
|
| 268 |
+
new_computation_node = _create_new_conv_node(
|
| 269 |
+
graph, computation_node, binary_node, other
|
| 270 |
+
)
|
| 271 |
+
binary_node.replace_all_uses_with(new_computation_node)
|
| 272 |
+
new_computation_node.meta.update(computation_node.meta)
|
| 273 |
+
graph.erase_node(binary_node)
|
| 274 |
+
graph.erase_node(computation_node)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/freezing_patterns.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch._inductor.compile_fx import fake_tensor_prop
|
| 5 |
+
from ..._dynamo.utils import counters
|
| 6 |
+
|
| 7 |
+
from .. import config
|
| 8 |
+
from ..pattern_matcher import (
|
| 9 |
+
_return_true,
|
| 10 |
+
CallFunction,
|
| 11 |
+
Ignored,
|
| 12 |
+
inference_graph,
|
| 13 |
+
init_once_fakemode,
|
| 14 |
+
KeywordArg,
|
| 15 |
+
Match,
|
| 16 |
+
PatternMatcherPass,
|
| 17 |
+
register_graph_pattern,
|
| 18 |
+
register_replacement,
|
| 19 |
+
stable_topological_sort,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
aten = torch.ops.aten
|
| 23 |
+
|
| 24 |
+
# First pass_patterns[0] are applied, then [1], then [2]
|
| 25 |
+
pass_patterns = [
|
| 26 |
+
PatternMatcherPass(),
|
| 27 |
+
PatternMatcherPass(),
|
| 28 |
+
PatternMatcherPass(),
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
binary_folding_pass = PatternMatcherPass()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def freezing_passes(gm: torch.fx.GraphModule, aot_example_inputs):
|
| 35 |
+
"""
|
| 36 |
+
Passes that are applied to the graph to freeze pass.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
from ..freezing import constant_fold
|
| 40 |
+
|
| 41 |
+
lazy_init()
|
| 42 |
+
# We need a few rounds of binary folding to get rid of all the
|
| 43 |
+
# unnecessary nodes, but may need a good method to chose the rounds number.
|
| 44 |
+
# works like: conv+binary+binary.
|
| 45 |
+
binary_folding = counters["inductor"]["binary_folding"]
|
| 46 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 47 |
+
|
| 48 |
+
torch._inductor.fx_passes.binary_folding.mark_mixed_dtype_allowed_convs(gm)
|
| 49 |
+
for _ in range(4):
|
| 50 |
+
constant_fold(gm)
|
| 51 |
+
# Make sure meta['val'] is properly set for all nodes
|
| 52 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 53 |
+
binary_folding_pass.apply(gm.graph)
|
| 54 |
+
# If we don't have binary folding, we don't need to run the pass again.
|
| 55 |
+
# TODO: remove the need to run fake_tensor_prop on the whole model.
|
| 56 |
+
if counters["inductor"]["binary_folding"] == binary_folding:
|
| 57 |
+
break
|
| 58 |
+
binary_folding = counters["inductor"]["binary_folding"]
|
| 59 |
+
|
| 60 |
+
torch._inductor.fx_passes.binary_folding.recover_original_precision_folded_convs(gm)
|
| 61 |
+
|
| 62 |
+
constant_fold(gm)
|
| 63 |
+
fake_tensor_prop(gm, aot_example_inputs, True)
|
| 64 |
+
|
| 65 |
+
for pattern in pass_patterns:
|
| 66 |
+
pattern.apply(gm.graph)
|
| 67 |
+
|
| 68 |
+
# The CPU weight packing always assume the conv's weight is channels last,
|
| 69 |
+
# So make sure the layout_optimization is on when doing it.
|
| 70 |
+
if (
|
| 71 |
+
torch._C._has_mkldnn
|
| 72 |
+
and config.cpp.weight_prepack
|
| 73 |
+
and config.layout_optimization
|
| 74 |
+
):
|
| 75 |
+
from .mkldnn_fusion import _eliminate_duplicate_packed_nodes
|
| 76 |
+
|
| 77 |
+
_eliminate_duplicate_packed_nodes(gm)
|
| 78 |
+
|
| 79 |
+
stable_topological_sort(gm.graph)
|
| 80 |
+
gm.recompile()
|
| 81 |
+
gm.graph.lint()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@init_once_fakemode
|
| 85 |
+
def lazy_init():
|
| 86 |
+
if torch._C._has_mkldnn and config.cpp.weight_prepack:
|
| 87 |
+
from .mkldnn_fusion import _mkldnn_weight_pack_init
|
| 88 |
+
|
| 89 |
+
_mkldnn_weight_pack_init()
|
| 90 |
+
|
| 91 |
+
from .binary_folding import binary_folding_init
|
| 92 |
+
|
| 93 |
+
addmm_patterns_init()
|
| 94 |
+
binary_folding_init()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def register_freezing_graph_pattern(pattern, extra_check=_return_true, pass_number=0):
|
| 98 |
+
return register_graph_pattern(
|
| 99 |
+
pattern,
|
| 100 |
+
extra_check=extra_check,
|
| 101 |
+
pass_dict=pass_patterns[pass_number],
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def register_binary_folding_pattern(pattern, extra_check=_return_true):
|
| 106 |
+
return register_graph_pattern(
|
| 107 |
+
pattern,
|
| 108 |
+
extra_check=extra_check,
|
| 109 |
+
pass_dict=binary_folding_pass,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
@functools.lru_cache(None)
|
| 114 |
+
def addmm_patterns_init():
|
| 115 |
+
if torch.cuda.is_available():
|
| 116 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 117 |
+
device = "cuda"
|
| 118 |
+
else:
|
| 119 |
+
device = "cpu"
|
| 120 |
+
val = functools.partial(torch.empty, (10, 10), device=device, requires_grad=False)
|
| 121 |
+
|
| 122 |
+
def check_concat_weights(match):
|
| 123 |
+
weights = [
|
| 124 |
+
match.kwargs["w1"],
|
| 125 |
+
match.kwargs["w2"],
|
| 126 |
+
match.kwargs["w3"],
|
| 127 |
+
]
|
| 128 |
+
return all(
|
| 129 |
+
w.op == "get_attr" and w.meta["val"].shape == weights[0].meta["val"].shape
|
| 130 |
+
for w in weights
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
def matmul_fuse_pattern(inp, w1, w2, w3):
|
| 134 |
+
return (inp @ w1, inp @ w2, inp @ w3)
|
| 135 |
+
|
| 136 |
+
def matmul_replacement(inp, w1, w2, w3):
|
| 137 |
+
cat_t = torch.cat((w1, w2, w3), dim=1)
|
| 138 |
+
mm = inp @ cat_t
|
| 139 |
+
return mm.chunk(3, dim=1)
|
| 140 |
+
|
| 141 |
+
register_replacement(
|
| 142 |
+
matmul_fuse_pattern,
|
| 143 |
+
matmul_replacement,
|
| 144 |
+
[val(), val(), val(), val()],
|
| 145 |
+
inference_graph,
|
| 146 |
+
pass_patterns[0],
|
| 147 |
+
extra_check=check_concat_weights,
|
| 148 |
+
exclusive_arg_names=("w1", "w2", "w3"),
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
def addmm_fuse_pattern_second(inp, w1, w2, w3, b1, b2, b3):
|
| 152 |
+
return (
|
| 153 |
+
aten.addmm(b1, inp, w1),
|
| 154 |
+
aten.addmm(b2, inp, w2),
|
| 155 |
+
aten.addmm(b3, inp, w3),
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def addmm_fuse_replacement_second(inp, w1, w2, w3, b1, b2, b3):
|
| 159 |
+
cat_w = torch.cat((w1, w2, w3), dim=1)
|
| 160 |
+
cat_b = torch.cat((b1, b2, b3))
|
| 161 |
+
return aten.addmm(cat_b, inp, cat_w).chunk(3, dim=1)
|
| 162 |
+
|
| 163 |
+
register_replacement(
|
| 164 |
+
addmm_fuse_pattern_second,
|
| 165 |
+
addmm_fuse_replacement_second,
|
| 166 |
+
[val() for _ in range(7)],
|
| 167 |
+
inference_graph,
|
| 168 |
+
pass_patterns[0],
|
| 169 |
+
extra_check=check_concat_weights,
|
| 170 |
+
exclusive_arg_names=("w1", "w2", "w3", "b1", "b2", "b3"),
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def same_dtype(match):
|
| 175 |
+
return match.output_node().args[0].meta["val"].dtype == match.kwargs["dtype"]
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
@register_graph_pattern(
|
| 179 |
+
CallFunction(
|
| 180 |
+
torch.ops.prims.convert_element_type.default,
|
| 181 |
+
Ignored(),
|
| 182 |
+
KeywordArg("dtype"),
|
| 183 |
+
),
|
| 184 |
+
pass_dict=pass_patterns[0],
|
| 185 |
+
extra_check=same_dtype,
|
| 186 |
+
)
|
| 187 |
+
def unnecessary_dtype_convert(match: Match, **kwargs):
|
| 188 |
+
"""Remove unnecessary dtype conversion op, probably left as a result of Conv-Bn folding"""
|
| 189 |
+
graph = match.graph
|
| 190 |
+
node = match.output_node()
|
| 191 |
+
node.replace_all_uses_with(node.args[0])
|
| 192 |
+
graph.erase_node(node)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/fuse_attention.py
ADDED
|
@@ -0,0 +1,568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import logging
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from ..._dynamo.utils import counters
|
| 7 |
+
from ..pattern_matcher import (
|
| 8 |
+
filter_nodes,
|
| 9 |
+
inference_graph,
|
| 10 |
+
register_replacement,
|
| 11 |
+
training_graph,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
log = logging.getLogger(__name__)
|
| 15 |
+
aten = torch.ops.aten
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _sfdp_pattern_1(query, key, value, inv_scale):
|
| 19 |
+
return (
|
| 20 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 21 |
+
.div(inv_scale)
|
| 22 |
+
.softmax(dim=-1)
|
| 23 |
+
.matmul(value)
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _sfdp_replacement_1(query, key, value, inv_scale):
|
| 28 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 29 |
+
return aten.scaled_dot_product_attention(
|
| 30 |
+
query.contiguous(),
|
| 31 |
+
key.contiguous(),
|
| 32 |
+
value.contiguous(),
|
| 33 |
+
attn_mask=None,
|
| 34 |
+
dropout_p=0.0,
|
| 35 |
+
is_causal=False,
|
| 36 |
+
scale=1.0 / inv_scale,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _sfdp_pattern_2(query, key, value, scale_factor):
|
| 41 |
+
return (
|
| 42 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 43 |
+
.mul(scale_factor)
|
| 44 |
+
.softmax(dim=-1)
|
| 45 |
+
.matmul(value)
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _sfdp_replacement_2(query, key, value, scale_factor):
|
| 50 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 51 |
+
return aten.scaled_dot_product_attention(
|
| 52 |
+
query.contiguous(),
|
| 53 |
+
key.contiguous(),
|
| 54 |
+
value.contiguous(),
|
| 55 |
+
attn_mask=None,
|
| 56 |
+
dropout_p=0.0,
|
| 57 |
+
is_causal=False,
|
| 58 |
+
scale=scale_factor,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def _sfdp_pattern_3(query, key, value, inv_scale_factor, dropout_p):
|
| 63 |
+
return torch.nn.functional.dropout(
|
| 64 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 65 |
+
.div(inv_scale_factor)
|
| 66 |
+
.softmax(dim=-1),
|
| 67 |
+
p=dropout_p,
|
| 68 |
+
).matmul(value)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _sfdp_replacement_3(query, key, value, inv_scale_factor, dropout_p):
|
| 72 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 73 |
+
return aten.scaled_dot_product_attention(
|
| 74 |
+
query.contiguous(),
|
| 75 |
+
key.contiguous(),
|
| 76 |
+
value.contiguous(),
|
| 77 |
+
attn_mask=None,
|
| 78 |
+
dropout_p=dropout_p,
|
| 79 |
+
is_causal=False,
|
| 80 |
+
scale=1.0 / inv_scale_factor,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _sfdp_pattern_4(query, key, value, scale_factor, dropout_p):
|
| 85 |
+
return torch.nn.functional.dropout(
|
| 86 |
+
torch.matmul(query, key.transpose(-2, -1)).mul(scale_factor).softmax(dim=-1),
|
| 87 |
+
p=dropout_p,
|
| 88 |
+
).matmul(value)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _sfdp_replacement_4(query, key, value, scale_factor, dropout_p):
|
| 92 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 93 |
+
return aten.scaled_dot_product_attention(
|
| 94 |
+
query.contiguous(),
|
| 95 |
+
key.contiguous(),
|
| 96 |
+
value.contiguous(),
|
| 97 |
+
attn_mask=None,
|
| 98 |
+
dropout_p=dropout_p,
|
| 99 |
+
is_causal=False,
|
| 100 |
+
scale=scale_factor,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _sfdp_pattern_5(query, key, value, attn_mask):
|
| 105 |
+
attn_weight = torch.softmax(
|
| 106 |
+
(query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
|
| 107 |
+
)
|
| 108 |
+
# attn_weight = torch.dropout(attn_weight, dropout_p)
|
| 109 |
+
return attn_weight @ value
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _sfdp_replacement_5(query, key, value, attn_mask):
|
| 113 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 114 |
+
return aten.scaled_dot_product_attention(
|
| 115 |
+
query.contiguous(),
|
| 116 |
+
key.contiguous(),
|
| 117 |
+
value.contiguous(),
|
| 118 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 119 |
+
dropout_p=0.0,
|
| 120 |
+
is_causal=False,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def _sfdp_pattern_6(query, key, value, attn_mask, dropout_p):
|
| 125 |
+
attn_weight = torch.softmax(
|
| 126 |
+
(query @ key.transpose(-2, -1) / math.sqrt(query.size(-1))) + attn_mask, dim=-1
|
| 127 |
+
)
|
| 128 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 129 |
+
return attn_weight @ value
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _sfdp_replacement_6(query, key, value, attn_mask, dropout_p):
|
| 133 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 134 |
+
return aten.scaled_dot_product_attention(
|
| 135 |
+
query.contiguous(),
|
| 136 |
+
key.contiguous(),
|
| 137 |
+
value.contiguous(),
|
| 138 |
+
attn_mask=attn_mask.to(dtype=query.dtype),
|
| 139 |
+
dropout_p=dropout_p,
|
| 140 |
+
is_causal=False,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def _sfdp_pattern_7(query, key, value, dropout_p):
|
| 145 |
+
# in real workloads inputs to matmul are permuted
|
| 146 |
+
# causing matmul to expand to a series of expand and clone calls
|
| 147 |
+
# we want the same to happen during pattern tracing
|
| 148 |
+
q = query.permute(0, 2, 1, 3)
|
| 149 |
+
k = key.permute(0, 2, 1, 3)
|
| 150 |
+
v = value.permute(0, 2, 1, 3)
|
| 151 |
+
div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
|
| 152 |
+
div = div.to(torch.float32)
|
| 153 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 154 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 155 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 156 |
+
return attn_weight @ v
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def _sfdp_replacement_7(query, key, value, dropout_p):
|
| 160 |
+
# sdpa prefers inputs in permuted format
|
| 161 |
+
# it makes a copy to put them in this format
|
| 162 |
+
# if they aren't already
|
| 163 |
+
# to make replacement efficient ensure that inputs to sdpa
|
| 164 |
+
# are in required order
|
| 165 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 166 |
+
q = query.permute(0, 2, 1, 3)
|
| 167 |
+
k = key.permute(0, 2, 1, 3)
|
| 168 |
+
v = value.permute(0, 2, 1, 3)
|
| 169 |
+
return aten.scaled_dot_product_attention(
|
| 170 |
+
q,
|
| 171 |
+
k,
|
| 172 |
+
v,
|
| 173 |
+
attn_mask=None, # attn_mask,
|
| 174 |
+
dropout_p=dropout_p,
|
| 175 |
+
is_causal=False,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _sfdp_pattern_8(query, key, value):
|
| 180 |
+
# no dropout version of pattern 7
|
| 181 |
+
q = query.permute(0, 2, 1, 3)
|
| 182 |
+
k = key.permute(0, 2, 1, 3)
|
| 183 |
+
v = value.permute(0, 2, 1, 3)
|
| 184 |
+
div = q @ k.transpose(-2, -1) / math.sqrt(q.size(-1))
|
| 185 |
+
div = div.to(torch.float32)
|
| 186 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 187 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 188 |
+
return attn_weight @ v
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _sfdp_replacement_8(query, key, value):
|
| 192 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 193 |
+
q = query.permute(0, 2, 1, 3)
|
| 194 |
+
k = key.permute(0, 2, 1, 3)
|
| 195 |
+
v = value.permute(0, 2, 1, 3)
|
| 196 |
+
return aten.scaled_dot_product_attention(
|
| 197 |
+
q,
|
| 198 |
+
k,
|
| 199 |
+
v,
|
| 200 |
+
attn_mask=None, # attn_mask,
|
| 201 |
+
dropout_p=0.0,
|
| 202 |
+
is_causal=False,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def _sfdp_pattern_9(query, key, value, dropout_p):
|
| 207 |
+
q = query.permute(0, 2, 1, 3)
|
| 208 |
+
k = key.permute(0, 2, 1, 3)
|
| 209 |
+
v = value.permute(0, 2, 1, 3)
|
| 210 |
+
q = q / math.sqrt(q.size(-1))
|
| 211 |
+
div = q @ k.transpose(-2, -1)
|
| 212 |
+
div = div.to(torch.float32)
|
| 213 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 214 |
+
attn_weight = torch.dropout(attn_weight, dropout_p, True)
|
| 215 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 216 |
+
return attn_weight @ v
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _sfdp_replacement_9(query, key, value, dropout_p):
|
| 220 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 221 |
+
q = query.permute(0, 2, 1, 3)
|
| 222 |
+
k = key.permute(0, 2, 1, 3)
|
| 223 |
+
v = value.permute(0, 2, 1, 3)
|
| 224 |
+
return aten.scaled_dot_product_attention(
|
| 225 |
+
q,
|
| 226 |
+
k,
|
| 227 |
+
v,
|
| 228 |
+
attn_mask=None, # attn_mask,
|
| 229 |
+
dropout_p=dropout_p,
|
| 230 |
+
is_causal=False,
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _sfdp_pattern_10(query, key, value):
|
| 235 |
+
# no dropout version of 9
|
| 236 |
+
q = query.permute(0, 2, 1, 3)
|
| 237 |
+
k = key.permute(0, 2, 1, 3)
|
| 238 |
+
v = value.permute(0, 2, 1, 3)
|
| 239 |
+
q = q / math.sqrt(q.size(-1))
|
| 240 |
+
div = q @ k.transpose(-2, -1)
|
| 241 |
+
div = div.to(torch.float32)
|
| 242 |
+
attn_weight = torch.softmax(div, dim=-1)
|
| 243 |
+
attn_weight = attn_weight.to(torch.float16)
|
| 244 |
+
return attn_weight @ v
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _sfdp_replacement_10(query, key, value):
|
| 248 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 249 |
+
q = query.permute(0, 2, 1, 3)
|
| 250 |
+
k = key.permute(0, 2, 1, 3)
|
| 251 |
+
v = value.permute(0, 2, 1, 3)
|
| 252 |
+
return aten.scaled_dot_product_attention(
|
| 253 |
+
q,
|
| 254 |
+
k,
|
| 255 |
+
v,
|
| 256 |
+
attn_mask=None, # attn_mask,
|
| 257 |
+
dropout_p=0.0,
|
| 258 |
+
is_causal=False,
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _sfdp_pattern_11(query, key, value, inv_scale):
|
| 263 |
+
# Mainly for huggingface models
|
| 264 |
+
q = query.permute(0, 2, 1, 3)
|
| 265 |
+
k = key.permute(0, 2, 1, 3)
|
| 266 |
+
v = value.permute(0, 2, 1, 3)
|
| 267 |
+
return torch.matmul(q, k.transpose(-2, -1)).div(inv_scale).softmax(dim=-1).matmul(v)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def _sfdp_replacement_11(query, key, value, inv_scale):
|
| 271 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 272 |
+
return aten.scaled_dot_product_attention(
|
| 273 |
+
query.transpose(1, 2),
|
| 274 |
+
key.transpose(1, 2),
|
| 275 |
+
value.transpose(1, 2),
|
| 276 |
+
attn_mask=None,
|
| 277 |
+
dropout_p=0.0,
|
| 278 |
+
is_causal=False,
|
| 279 |
+
scale=1.0 / inv_scale,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _sfdp_pattern_12(query, key, value, inv_scale_factor, dropout_p):
|
| 284 |
+
q = query.permute(0, 2, 1, 3)
|
| 285 |
+
k = key.permute(0, 2, 1, 3)
|
| 286 |
+
v = value.permute(0, 2, 1, 3)
|
| 287 |
+
return torch.nn.functional.dropout(
|
| 288 |
+
torch.matmul(q, k.transpose(-2, -1)).div(inv_scale_factor).softmax(dim=-1),
|
| 289 |
+
p=dropout_p,
|
| 290 |
+
).matmul(v)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def _sfdp_replacement_12(query, key, value, inv_scale_factor, dropout_p):
|
| 294 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 295 |
+
return aten.scaled_dot_product_attention(
|
| 296 |
+
query.transpose(1, 2),
|
| 297 |
+
key.transpose(1, 2),
|
| 298 |
+
value.transpose(1, 2),
|
| 299 |
+
attn_mask=None,
|
| 300 |
+
dropout_p=dropout_p,
|
| 301 |
+
is_causal=False,
|
| 302 |
+
scale=1.0 / inv_scale_factor,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def _sfdp_pattern_13(query, key, value, inv_scale):
|
| 307 |
+
# dropout would create a clone() if eval() or p = 0
|
| 308 |
+
return (
|
| 309 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 310 |
+
.div(inv_scale)
|
| 311 |
+
.softmax(dim=-1)
|
| 312 |
+
.clone()
|
| 313 |
+
.matmul(value)
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def _sfdp_replacement_13(query, key, value, inv_scale):
|
| 318 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 319 |
+
return aten.scaled_dot_product_attention(
|
| 320 |
+
query.contiguous(),
|
| 321 |
+
key.contiguous(),
|
| 322 |
+
value.contiguous(),
|
| 323 |
+
attn_mask=None,
|
| 324 |
+
dropout_p=0.0,
|
| 325 |
+
is_causal=False,
|
| 326 |
+
scale=1.0 / inv_scale,
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def _sfdp_pattern_14(query, key, value, scale_factor):
|
| 331 |
+
# dropout would create a clone() if eval() or p = 0
|
| 332 |
+
return (
|
| 333 |
+
torch.matmul(query, key.transpose(-2, -1))
|
| 334 |
+
.mul(scale_factor)
|
| 335 |
+
.softmax(dim=-1)
|
| 336 |
+
.clone()
|
| 337 |
+
.matmul(value)
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def _sfdp_replacement_14(query, key, value, scale_factor):
|
| 342 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 343 |
+
return aten.scaled_dot_product_attention(
|
| 344 |
+
query.contiguous(),
|
| 345 |
+
key.contiguous(),
|
| 346 |
+
value.contiguous(),
|
| 347 |
+
attn_mask=None,
|
| 348 |
+
dropout_p=0.0,
|
| 349 |
+
is_causal=False,
|
| 350 |
+
scale=scale_factor,
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def _sfdp_pattern_15(query, key, value, inv_scale):
|
| 355 |
+
# dropout would create a clone() if eval() or p = 0
|
| 356 |
+
q = query.permute(0, 2, 1, 3)
|
| 357 |
+
k = key.permute(0, 2, 1, 3)
|
| 358 |
+
v = value.permute(0, 2, 1, 3)
|
| 359 |
+
return (
|
| 360 |
+
torch.matmul(q, k.transpose(-2, -1))
|
| 361 |
+
.div(inv_scale)
|
| 362 |
+
.softmax(dim=-1)
|
| 363 |
+
.clone()
|
| 364 |
+
.matmul(v)
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def _sfdp_replacement_15(query, key, value, inv_scale):
|
| 369 |
+
counters["inductor"]["fuse_attention"] += 1
|
| 370 |
+
return aten.scaled_dot_product_attention(
|
| 371 |
+
query.transpose(1, 2),
|
| 372 |
+
key.transpose(1, 2),
|
| 373 |
+
value.transpose(1, 2),
|
| 374 |
+
attn_mask=None,
|
| 375 |
+
dropout_p=0.0,
|
| 376 |
+
is_causal=False,
|
| 377 |
+
scale=1.0 / inv_scale,
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def _sfdp_params_check(match):
|
| 382 |
+
assert all(k in match.kwargs for k in ("query", "key", "value"))
|
| 383 |
+
query = match.kwargs["query"].meta["val"]
|
| 384 |
+
key = match.kwargs["key"].meta["val"]
|
| 385 |
+
value = match.kwargs["value"].meta["val"]
|
| 386 |
+
if not (query.dtype == key.dtype == value.dtype) or not (
|
| 387 |
+
query.device == key.device == value.device
|
| 388 |
+
):
|
| 389 |
+
return False
|
| 390 |
+
add_mask_node = filter_nodes(match.nodes, aten.add.Tensor)
|
| 391 |
+
# Has attn_mask add.
|
| 392 |
+
if len(add_mask_node) > 0:
|
| 393 |
+
attn_mask_node = add_mask_node[0].args[1]
|
| 394 |
+
# attn_mask_node may be a float/int number.
|
| 395 |
+
if not hasattr(attn_mask_node, "meta"):
|
| 396 |
+
return False
|
| 397 |
+
attn_mask = attn_mask_node.meta["val"]
|
| 398 |
+
# Make sure attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool
|
| 399 |
+
if (
|
| 400 |
+
not isinstance(attn_mask, torch.Tensor)
|
| 401 |
+
or not (attn_mask.dtype == query.dtype or attn_mask.dtype == torch.bool)
|
| 402 |
+
or query.device != attn_mask.device
|
| 403 |
+
):
|
| 404 |
+
return False
|
| 405 |
+
return True
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def _sfdp_scale_factor_check(scale_factor_op):
|
| 409 |
+
def fn(match):
|
| 410 |
+
scale_factor_node = filter_nodes(match.nodes, scale_factor_op)[0]
|
| 411 |
+
# Note: args[1] of the scale_factor_node is always the scale_factor for the current patterns.
|
| 412 |
+
scale_factor = scale_factor_node.args[1]
|
| 413 |
+
# make sure the scale_factor a float/int. SymInt?
|
| 414 |
+
if not isinstance(scale_factor, (float, int)):
|
| 415 |
+
return False
|
| 416 |
+
return _sfdp_params_check(match)
|
| 417 |
+
|
| 418 |
+
return fn
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
@functools.lru_cache(None)
|
| 422 |
+
def _sfdp_init():
|
| 423 |
+
from .joint_graph import patterns
|
| 424 |
+
|
| 425 |
+
if torch.cuda.is_available():
|
| 426 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 427 |
+
device = "cuda"
|
| 428 |
+
else:
|
| 429 |
+
device = "cpu"
|
| 430 |
+
|
| 431 |
+
# sizes/values don't actually matter for initial trace
|
| 432 |
+
# once we get a possible match we re-trace with the actual values and verify the match still holds
|
| 433 |
+
g = functools.partial(torch.empty, (2, 4, 8, 16), device=device, requires_grad=True)
|
| 434 |
+
gp = functools.partial(
|
| 435 |
+
torch.empty, (2, 8, 4, 16), device=device, requires_grad=True, dtype=torch.half
|
| 436 |
+
)
|
| 437 |
+
b = functools.partial(torch.empty, (1, 1, 8, 8), device=device)
|
| 438 |
+
c = functools.partial(torch.tensor, 2.0, device=device)
|
| 439 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 440 |
+
# 0.113377 is a "magic" value that lets us recover the lost input arg relationship
|
| 441 |
+
d = {"dropout_p": 0.113377}
|
| 442 |
+
|
| 443 |
+
for pattern, replacement, args, workaround, extra_check in [
|
| 444 |
+
(
|
| 445 |
+
_sfdp_pattern_1,
|
| 446 |
+
_sfdp_replacement_1,
|
| 447 |
+
[g(), g(), g(), c()],
|
| 448 |
+
{},
|
| 449 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 450 |
+
),
|
| 451 |
+
(
|
| 452 |
+
_sfdp_pattern_2,
|
| 453 |
+
_sfdp_replacement_2,
|
| 454 |
+
[g(), g(), g(), c()],
|
| 455 |
+
{},
|
| 456 |
+
_sfdp_scale_factor_check(aten.mul.Tensor),
|
| 457 |
+
),
|
| 458 |
+
(
|
| 459 |
+
_sfdp_pattern_3,
|
| 460 |
+
_sfdp_replacement_3,
|
| 461 |
+
[g(), g(), g(), c()],
|
| 462 |
+
d,
|
| 463 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 464 |
+
),
|
| 465 |
+
(
|
| 466 |
+
_sfdp_pattern_4,
|
| 467 |
+
_sfdp_replacement_4,
|
| 468 |
+
[g(), g(), g(), c()],
|
| 469 |
+
d,
|
| 470 |
+
_sfdp_scale_factor_check(aten.mul.Tensor),
|
| 471 |
+
),
|
| 472 |
+
(
|
| 473 |
+
_sfdp_pattern_5,
|
| 474 |
+
_sfdp_replacement_5,
|
| 475 |
+
[g(), g(), g(), b()],
|
| 476 |
+
{},
|
| 477 |
+
_sfdp_params_check,
|
| 478 |
+
),
|
| 479 |
+
(
|
| 480 |
+
_sfdp_pattern_6,
|
| 481 |
+
_sfdp_replacement_6,
|
| 482 |
+
[g(), g(), g(), b()],
|
| 483 |
+
d,
|
| 484 |
+
_sfdp_params_check,
|
| 485 |
+
),
|
| 486 |
+
(
|
| 487 |
+
_sfdp_pattern_7,
|
| 488 |
+
_sfdp_replacement_7,
|
| 489 |
+
[gp(), gp(), gp()],
|
| 490 |
+
d,
|
| 491 |
+
_sfdp_params_check,
|
| 492 |
+
),
|
| 493 |
+
(
|
| 494 |
+
_sfdp_pattern_8,
|
| 495 |
+
_sfdp_replacement_8,
|
| 496 |
+
[gp(), gp(), gp()],
|
| 497 |
+
{},
|
| 498 |
+
_sfdp_params_check,
|
| 499 |
+
),
|
| 500 |
+
(
|
| 501 |
+
_sfdp_pattern_9,
|
| 502 |
+
_sfdp_replacement_9,
|
| 503 |
+
[gp(), gp(), gp()],
|
| 504 |
+
d,
|
| 505 |
+
_sfdp_params_check,
|
| 506 |
+
),
|
| 507 |
+
(
|
| 508 |
+
_sfdp_pattern_10,
|
| 509 |
+
_sfdp_replacement_10,
|
| 510 |
+
[gp(), gp(), gp()],
|
| 511 |
+
{},
|
| 512 |
+
_sfdp_params_check,
|
| 513 |
+
),
|
| 514 |
+
(
|
| 515 |
+
_sfdp_pattern_11,
|
| 516 |
+
_sfdp_replacement_11,
|
| 517 |
+
[g(), g(), g(), c()],
|
| 518 |
+
{},
|
| 519 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 520 |
+
),
|
| 521 |
+
(
|
| 522 |
+
_sfdp_pattern_12,
|
| 523 |
+
_sfdp_replacement_12,
|
| 524 |
+
[g(), g(), g(), c()],
|
| 525 |
+
d,
|
| 526 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 527 |
+
),
|
| 528 |
+
(
|
| 529 |
+
_sfdp_pattern_13,
|
| 530 |
+
_sfdp_replacement_13,
|
| 531 |
+
[g(), g(), g(), c()],
|
| 532 |
+
{},
|
| 533 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 534 |
+
),
|
| 535 |
+
(
|
| 536 |
+
_sfdp_pattern_14,
|
| 537 |
+
_sfdp_replacement_14,
|
| 538 |
+
[g(), g(), g(), c()],
|
| 539 |
+
{},
|
| 540 |
+
_sfdp_scale_factor_check(aten.mul.Tensor),
|
| 541 |
+
),
|
| 542 |
+
(
|
| 543 |
+
_sfdp_pattern_15,
|
| 544 |
+
_sfdp_replacement_15,
|
| 545 |
+
[g(), g(), g(), c()],
|
| 546 |
+
{},
|
| 547 |
+
_sfdp_scale_factor_check(aten.div.Tensor),
|
| 548 |
+
),
|
| 549 |
+
]:
|
| 550 |
+
args = [*args, *workaround.values()]
|
| 551 |
+
register_replacement(
|
| 552 |
+
pattern,
|
| 553 |
+
replacement,
|
| 554 |
+
args,
|
| 555 |
+
training_graph,
|
| 556 |
+
patterns,
|
| 557 |
+
extra_check=extra_check,
|
| 558 |
+
scalar_workaround=workaround,
|
| 559 |
+
)
|
| 560 |
+
register_replacement(
|
| 561 |
+
pattern,
|
| 562 |
+
replacement,
|
| 563 |
+
args,
|
| 564 |
+
inference_graph,
|
| 565 |
+
patterns,
|
| 566 |
+
extra_check=extra_check,
|
| 567 |
+
scalar_workaround=workaround,
|
| 568 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/group_batch_fusion.py
ADDED
|
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch._dynamo.utils import counters
|
| 7 |
+
|
| 8 |
+
from .. import config
|
| 9 |
+
from ..pattern_matcher import (
|
| 10 |
+
CallFunctionVarArgs,
|
| 11 |
+
get_arg_value,
|
| 12 |
+
stable_topological_sort,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
# importing this will register fbgemm lowerings for inductor
|
| 17 |
+
import deeplearning.fbgemm.fbgemm_gpu.fb.inductor_lowerings # noqa: F401
|
| 18 |
+
|
| 19 |
+
has_fbgemm = True
|
| 20 |
+
except Exception:
|
| 21 |
+
has_fbgemm = False
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
aten = torch.ops.aten
|
| 25 |
+
|
| 26 |
+
log = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
MIN_FUSE_SET_SIZE = 5
|
| 29 |
+
MAX_FUSE_SET_SIZE = 300
|
| 30 |
+
MAX_FUSE_SEARCH_DEPTH = 5
|
| 31 |
+
# The maximum tensor size that can go into the fusion group
|
| 32 |
+
MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR = 4096
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class GroupBatchFusionBase:
|
| 36 |
+
def match(self, node):
|
| 37 |
+
raise NotImplementedError("match called on base")
|
| 38 |
+
|
| 39 |
+
def fuse(self, graph, subset):
|
| 40 |
+
raise NotImplementedError("fuse called on base")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class GroupFusion(GroupBatchFusionBase):
|
| 44 |
+
"""
|
| 45 |
+
Fuse ops in a group way, e.g, fuse mm/addmm of arbitrary input shapes with fbgemm.gmm.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class BatchFusion(GroupBatchFusionBase):
|
| 52 |
+
"""
|
| 53 |
+
Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class GroupLinearFusion(GroupFusion):
|
| 60 |
+
def _addmm_node_can_be_fused(self, node):
|
| 61 |
+
input_shape = node.args[1].meta["tensor_meta"].shape
|
| 62 |
+
weight_shape = node.args[2].meta["tensor_meta"].shape
|
| 63 |
+
return (
|
| 64 |
+
node.kwargs.get("beta", 1.0) == 1.0
|
| 65 |
+
and node.kwargs.get("alpha", 1.0) == 1.0
|
| 66 |
+
and len(input_shape) == 2
|
| 67 |
+
and len(weight_shape) == 2
|
| 68 |
+
and all(x % 2 == 0 for x in input_shape + weight_shape)
|
| 69 |
+
and shape <= MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR
|
| 70 |
+
for shape in input_shape + weight_shape
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
def _mm_node_can_be_fused(self, node):
|
| 74 |
+
input_shape = node.args[0].meta["tensor_meta"].shape
|
| 75 |
+
weight_shape = node.args[1].meta["tensor_meta"].shape
|
| 76 |
+
return (
|
| 77 |
+
len(input_shape) == 2
|
| 78 |
+
and len(weight_shape) == 2
|
| 79 |
+
and all(x % 2 == 0 for x in input_shape + weight_shape)
|
| 80 |
+
and shape <= MAX_FUSE_TENSOR_SIZE_GROUP_LINEAR
|
| 81 |
+
for shape in input_shape + weight_shape
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
def match(self, node):
|
| 85 |
+
if CallFunctionVarArgs(aten.mm.default).match(
|
| 86 |
+
node
|
| 87 |
+
) and self._mm_node_can_be_fused(node):
|
| 88 |
+
group_key = ("group_linear", True)
|
| 89 |
+
elif CallFunctionVarArgs(aten.addmm.default).match(
|
| 90 |
+
node
|
| 91 |
+
) and self._addmm_node_can_be_fused(node):
|
| 92 |
+
bias = node.args[0]
|
| 93 |
+
group_key = ("group_linear", bias is None)
|
| 94 |
+
else:
|
| 95 |
+
group_key = None
|
| 96 |
+
return group_key
|
| 97 |
+
|
| 98 |
+
def fuse(self, graph, subset):
|
| 99 |
+
group_inputs = []
|
| 100 |
+
group_weights = []
|
| 101 |
+
group_biases = []
|
| 102 |
+
group_nodes = []
|
| 103 |
+
for node in subset:
|
| 104 |
+
if CallFunctionVarArgs(aten.addmm.default).match(node):
|
| 105 |
+
bias, input, weight = node.args
|
| 106 |
+
else:
|
| 107 |
+
assert CallFunctionVarArgs(aten.mm.default).match(node)
|
| 108 |
+
input, weight = node.args
|
| 109 |
+
bias = None
|
| 110 |
+
|
| 111 |
+
group_nodes.append(node)
|
| 112 |
+
group_inputs.append(input)
|
| 113 |
+
group_weights.append(weight)
|
| 114 |
+
group_biases.append(bias)
|
| 115 |
+
|
| 116 |
+
if all(bias is None for bias in group_biases):
|
| 117 |
+
group_biases = None
|
| 118 |
+
|
| 119 |
+
with graph.inserting_before(subset[0]):
|
| 120 |
+
fused_mm = graph.call_function(
|
| 121 |
+
torch.ops.fbgemm.gmm,
|
| 122 |
+
args=(group_inputs, group_weights, group_biases),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
for i, original_mm in enumerate(group_nodes):
|
| 126 |
+
with graph.inserting_after(fused_mm):
|
| 127 |
+
new_mm = graph.call_function(operator.getitem, args=(fused_mm, i))
|
| 128 |
+
original_mm.replace_all_uses_with(new_mm)
|
| 129 |
+
new_mm.meta.update(original_mm.meta)
|
| 130 |
+
graph.erase_node(original_mm)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class BatchLinearLHSFusion(BatchFusion):
|
| 134 |
+
"""
|
| 135 |
+
Batch linear left-hand side fusion. This pass tries to fuse the following patterns:
|
| 136 |
+
|
| 137 |
+
torch.nn.functional.linear(x, w1), linear(x, w2),... * linear(x, wn)
|
| 138 |
+
-> torch.mm(x, torch.cat([w1, w2,... * wn]).transpose(0, 1))
|
| 139 |
+
|
| 140 |
+
We have a separate pass to eliminate contiguous transpose in a generic way.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
def match(self, node):
|
| 144 |
+
if CallFunctionVarArgs(torch.nn.functional.linear).match(
|
| 145 |
+
node
|
| 146 |
+
) and is_linear_node_can_be_fused(node):
|
| 147 |
+
input = get_arg_value(node, 0, "input")
|
| 148 |
+
bias = get_arg_value(node, 2, "bias")
|
| 149 |
+
group_key = ("batch_linear_lhs", bias is None, input)
|
| 150 |
+
else:
|
| 151 |
+
group_key = None
|
| 152 |
+
return group_key
|
| 153 |
+
|
| 154 |
+
def fuse(self, graph, subset):
|
| 155 |
+
batch_nodes = []
|
| 156 |
+
batch_input = None
|
| 157 |
+
batch_weights = []
|
| 158 |
+
batch_biases = []
|
| 159 |
+
split_sections = []
|
| 160 |
+
for node in subset:
|
| 161 |
+
input = get_arg_value(node, 0, "input")
|
| 162 |
+
weight = get_arg_value(node, 1, "weight")
|
| 163 |
+
bias = get_arg_value(node, 2, "bias")
|
| 164 |
+
batch_nodes.append(node)
|
| 165 |
+
if batch_input is None:
|
| 166 |
+
batch_input = input
|
| 167 |
+
else:
|
| 168 |
+
assert batch_input is input
|
| 169 |
+
batch_weights.append(weight)
|
| 170 |
+
if bias:
|
| 171 |
+
batch_biases.append(bias)
|
| 172 |
+
split_sections.append(weight.meta["example_value"].shape[0])
|
| 173 |
+
|
| 174 |
+
with graph.inserting_before(subset[0]):
|
| 175 |
+
cat_weights = graph.call_function(torch.cat, args=((batch_weights, 0)))
|
| 176 |
+
transposed_weights = graph.call_function(
|
| 177 |
+
torch.transpose, args=(cat_weights, 0, 1)
|
| 178 |
+
)
|
| 179 |
+
if len(batch_biases) > 0:
|
| 180 |
+
cat_biases = graph.call_function(torch.cat, args=((batch_biases, 0)))
|
| 181 |
+
fused_lhs = graph.call_function(
|
| 182 |
+
torch.addmm,
|
| 183 |
+
args=(cat_biases, batch_input, transposed_weights),
|
| 184 |
+
)
|
| 185 |
+
else:
|
| 186 |
+
fused_lhs = graph.call_function(
|
| 187 |
+
torch.mm,
|
| 188 |
+
args=(batch_input, transposed_weights),
|
| 189 |
+
)
|
| 190 |
+
fused_lhs_list = graph.call_function(
|
| 191 |
+
torch.split, args=((fused_lhs, split_sections, 1))
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
for i, node in enumerate(batch_nodes):
|
| 195 |
+
with graph.inserting_after(fused_lhs_list):
|
| 196 |
+
new_node = graph.call_function(
|
| 197 |
+
operator.getitem, args=(fused_lhs_list, i)
|
| 198 |
+
)
|
| 199 |
+
node.replace_all_uses_with(new_node)
|
| 200 |
+
new_node.meta.update(node.meta)
|
| 201 |
+
graph.erase_node(node)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def is_node_meta_valid(node):
|
| 205 |
+
if node is None:
|
| 206 |
+
return True
|
| 207 |
+
if "example_value" not in node.meta:
|
| 208 |
+
return False
|
| 209 |
+
return True
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def is_linear_node_can_be_fused(node):
|
| 213 |
+
input = get_arg_value(node, 0, "input")
|
| 214 |
+
weight = get_arg_value(node, 1, "weight")
|
| 215 |
+
return (
|
| 216 |
+
is_node_meta_valid(node)
|
| 217 |
+
and len(input.meta["example_value"].shape) == 2
|
| 218 |
+
and len(weight.meta["example_value"].shape) == 2
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class BatchLinearFusion(BatchFusion):
|
| 223 |
+
"""
|
| 224 |
+
Batch linear fusion in pre grad pass.
|
| 225 |
+
Fuse linear with same size with torch.baddmm
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def _getitem_args(self, getitem_node: torch.fx.Node):
|
| 229 |
+
if getitem_node.target != operator.__getitem__ or (
|
| 230 |
+
getitem_node.op != "call_function"
|
| 231 |
+
):
|
| 232 |
+
return None
|
| 233 |
+
return getitem_node.args[0]
|
| 234 |
+
|
| 235 |
+
def match(self, node):
|
| 236 |
+
if CallFunctionVarArgs(torch.nn.functional.linear).match(
|
| 237 |
+
node
|
| 238 |
+
) and is_linear_node_can_be_fused(node):
|
| 239 |
+
input = get_arg_value(node, 0, "input")
|
| 240 |
+
weight = get_arg_value(node, 1, "weight")
|
| 241 |
+
bias = get_arg_value(node, 2, "bias")
|
| 242 |
+
group_key = (
|
| 243 |
+
"batch_linear_pre_grad",
|
| 244 |
+
self._getitem_args(input),
|
| 245 |
+
str(input.meta["example_value"].shape),
|
| 246 |
+
str(weight.meta["example_value"].shape),
|
| 247 |
+
bias is None,
|
| 248 |
+
)
|
| 249 |
+
else:
|
| 250 |
+
group_key = None
|
| 251 |
+
return group_key
|
| 252 |
+
|
| 253 |
+
def fuse(self, graph, subset):
|
| 254 |
+
batch_nodes = []
|
| 255 |
+
batch_inputs = []
|
| 256 |
+
batch_weights = []
|
| 257 |
+
batch_biases = []
|
| 258 |
+
for node in subset:
|
| 259 |
+
batch_nodes.append(node)
|
| 260 |
+
batch_inputs.append(get_arg_value(node, 0, "input"))
|
| 261 |
+
batch_weights.append(get_arg_value(node, 1, "weight"))
|
| 262 |
+
batch_biases.append(get_arg_value(node, 2, "bias"))
|
| 263 |
+
|
| 264 |
+
with graph.inserting_before(subset[0]):
|
| 265 |
+
stack_inputs = graph.call_function(torch.stack, args=(batch_inputs, 0))
|
| 266 |
+
stack_weights = graph.call_function(torch.stack, args=(batch_weights, 0))
|
| 267 |
+
transpose_weight = graph.call_function(
|
| 268 |
+
torch.transpose, args=(stack_weights, 1, 2)
|
| 269 |
+
)
|
| 270 |
+
if all(bias is None for bias in batch_biases):
|
| 271 |
+
bmm = graph.call_function(
|
| 272 |
+
torch.bmm,
|
| 273 |
+
args=(stack_inputs, transpose_weight),
|
| 274 |
+
)
|
| 275 |
+
else:
|
| 276 |
+
stack_biases = graph.call_function(torch.stack, args=(batch_biases, 0))
|
| 277 |
+
unsqueeze_biases = graph.call_function(
|
| 278 |
+
torch.unsqueeze, args=(stack_biases, 1)
|
| 279 |
+
)
|
| 280 |
+
bmm = graph.call_function(
|
| 281 |
+
torch.baddbmm,
|
| 282 |
+
args=(unsqueeze_biases, stack_inputs, transpose_weight),
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
bmm = graph.call_function(torch.unbind, args=(bmm,), kwargs={"dim": 0})
|
| 286 |
+
for i, linear in enumerate(batch_nodes):
|
| 287 |
+
with graph.inserting_after(bmm):
|
| 288 |
+
getitem = graph.call_function(operator.getitem, args=(bmm, i))
|
| 289 |
+
linear.replace_all_uses_with(getitem)
|
| 290 |
+
getitem.meta.update(linear.meta)
|
| 291 |
+
graph.erase_node(linear)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class BatchTanhFusion(BatchFusion):
|
| 295 |
+
"""
|
| 296 |
+
Batch tanh fusion in pre grad pass.
|
| 297 |
+
We only fuse the tahn if the input is after same split node.
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
def _getitem_args(self, getitem_node: torch.fx.Node):
|
| 301 |
+
if getitem_node.target != operator.__getitem__ or (
|
| 302 |
+
getitem_node.op != "call_function"
|
| 303 |
+
):
|
| 304 |
+
return None
|
| 305 |
+
return getitem_node.args[0]
|
| 306 |
+
|
| 307 |
+
def match(self, node):
|
| 308 |
+
input = get_arg_value(node, 0, "input")
|
| 309 |
+
if (
|
| 310 |
+
CallFunctionVarArgs(torch.tanh).match(node)
|
| 311 |
+
and is_node_meta_valid(node)
|
| 312 |
+
and self._getitem_args(input) is not None
|
| 313 |
+
):
|
| 314 |
+
group_key = (
|
| 315 |
+
"batch_tanh",
|
| 316 |
+
self._getitem_args(input),
|
| 317 |
+
str(input.meta["example_value"].shape),
|
| 318 |
+
)
|
| 319 |
+
else:
|
| 320 |
+
group_key = None
|
| 321 |
+
return group_key
|
| 322 |
+
|
| 323 |
+
def fuse(self, graph, subset):
|
| 324 |
+
batch_nodes = []
|
| 325 |
+
batch_inputs = []
|
| 326 |
+
|
| 327 |
+
for node in subset:
|
| 328 |
+
batch_nodes.append(node)
|
| 329 |
+
batch_inputs.append(get_arg_value(node, 0, "input"))
|
| 330 |
+
|
| 331 |
+
with graph.inserting_before(subset[0]):
|
| 332 |
+
stack_inputs = graph.call_function(torch.stack, args=(batch_inputs, 0))
|
| 333 |
+
|
| 334 |
+
batch_tanh = graph.call_function(
|
| 335 |
+
torch.tanh,
|
| 336 |
+
args=(stack_inputs,),
|
| 337 |
+
)
|
| 338 |
+
unbind_tanh = graph.call_function(
|
| 339 |
+
torch.unbind, args=(batch_tanh,), kwargs={"dim": 0}
|
| 340 |
+
)
|
| 341 |
+
for i, node in enumerate(batch_nodes):
|
| 342 |
+
with graph.inserting_after(unbind_tanh):
|
| 343 |
+
getitem = graph.call_function(
|
| 344 |
+
operator.getitem, args=(unbind_tanh, i)
|
| 345 |
+
)
|
| 346 |
+
node.replace_all_uses_with(getitem)
|
| 347 |
+
getitem.meta.update(node.meta)
|
| 348 |
+
graph.erase_node(node)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class BatchLayernormFusion(BatchFusion):
|
| 352 |
+
"""
|
| 353 |
+
Batch layer norm fusion in pre grad pass
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
def match(self, node):
|
| 357 |
+
if CallFunctionVarArgs(torch.nn.functional.layer_norm).match(node):
|
| 358 |
+
input = get_arg_value(node, 0, "input")
|
| 359 |
+
weight = get_arg_value(node, 2, "weight")
|
| 360 |
+
bias = get_arg_value(node, 3, "bias")
|
| 361 |
+
group_key = (
|
| 362 |
+
(
|
| 363 |
+
"batch_layernorm",
|
| 364 |
+
str(input.meta["example_value"].shape),
|
| 365 |
+
str(weight.meta["example_value"].shape)
|
| 366 |
+
if weight is not None
|
| 367 |
+
else "",
|
| 368 |
+
str(bias.meta["example_value"].shape) if bias is not None else "",
|
| 369 |
+
str(get_arg_value(node, 1, "normalized_shape")),
|
| 370 |
+
str(get_arg_value(node, 4, "eps")),
|
| 371 |
+
)
|
| 372 |
+
if "example_value" in input.meta
|
| 373 |
+
and is_node_meta_valid(weight)
|
| 374 |
+
and is_node_meta_valid(bias)
|
| 375 |
+
else None
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
group_key = None
|
| 379 |
+
return group_key
|
| 380 |
+
|
| 381 |
+
def fuse(self, graph, subset):
|
| 382 |
+
group_inputs = []
|
| 383 |
+
group_shapes = []
|
| 384 |
+
group_weights = []
|
| 385 |
+
group_biases = []
|
| 386 |
+
group_epss = []
|
| 387 |
+
group_nodes = []
|
| 388 |
+
for node in subset:
|
| 389 |
+
group_nodes.append(node)
|
| 390 |
+
group_inputs.append(get_arg_value(node, 0, "input"))
|
| 391 |
+
group_shapes.append(get_arg_value(node, 1, "normalized_shape"))
|
| 392 |
+
group_weights.append(get_arg_value(node, 2, "weight"))
|
| 393 |
+
group_biases.append(get_arg_value(node, 3, "bias"))
|
| 394 |
+
eps = get_arg_value(node, 4, "eps")
|
| 395 |
+
if eps is None:
|
| 396 |
+
eps = 1e-5
|
| 397 |
+
group_epss.append(eps)
|
| 398 |
+
stack_dim = -1 - len(group_shapes[-1])
|
| 399 |
+
|
| 400 |
+
if all(bias is None for bias in group_biases):
|
| 401 |
+
group_biases = None
|
| 402 |
+
if all(weight is None for weight in group_weights):
|
| 403 |
+
group_weights = None
|
| 404 |
+
assert all(
|
| 405 |
+
eps == group_epss[0] for eps in group_epss
|
| 406 |
+
), "all epsilon values must be equal"
|
| 407 |
+
|
| 408 |
+
with graph.inserting_before(subset[0]):
|
| 409 |
+
stack_input = graph.call_function(
|
| 410 |
+
torch.stack, args=(group_inputs, stack_dim)
|
| 411 |
+
)
|
| 412 |
+
if group_weights is not None:
|
| 413 |
+
stack_weight = graph.call_function(torch.stack, args=(group_weights,))
|
| 414 |
+
else:
|
| 415 |
+
stack_weight = None
|
| 416 |
+
if group_biases is not None:
|
| 417 |
+
stack_bias = graph.call_function(torch.stack, args=(group_biases,))
|
| 418 |
+
else:
|
| 419 |
+
stack_bias = None
|
| 420 |
+
|
| 421 |
+
batch_layer_norm = graph.call_function(
|
| 422 |
+
torch.nn.functional.layer_norm,
|
| 423 |
+
args=(stack_input, group_shapes[-1]),
|
| 424 |
+
kwargs={"eps": group_epss[-1]},
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
if group_weights is not None and group_biases is not None:
|
| 428 |
+
batch_layer_norm = graph.call_function(
|
| 429 |
+
torch.addcmul, args=(stack_bias, stack_weight, batch_layer_norm)
|
| 430 |
+
)
|
| 431 |
+
elif group_weights is not None and group_biases is None:
|
| 432 |
+
batch_layer_norm = graph.call_function(
|
| 433 |
+
torch.mul, args=(stack_weight, batch_layer_norm)
|
| 434 |
+
)
|
| 435 |
+
elif group_weights is None and group_biases is not None:
|
| 436 |
+
batch_layer_norm = graph.call_function(
|
| 437 |
+
torch.add, args=(stack_bias, batch_layer_norm)
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
batch_layer_norm_unbind = graph.call_function(
|
| 441 |
+
torch.unbind,
|
| 442 |
+
args=(batch_layer_norm,),
|
| 443 |
+
kwargs={"dim": stack_dim},
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
for i, node in enumerate(group_nodes):
|
| 447 |
+
with graph.inserting_after(batch_layer_norm_unbind):
|
| 448 |
+
new_node = graph.call_function(
|
| 449 |
+
operator.getitem, args=(batch_layer_norm_unbind, i)
|
| 450 |
+
)
|
| 451 |
+
node.replace_all_uses_with(new_node)
|
| 452 |
+
new_node.meta.update(node.meta)
|
| 453 |
+
graph.erase_node(node)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def find_independent_subset_greedy(node_list):
|
| 457 |
+
"""
|
| 458 |
+
Return a list of subset from node_list, all nodes in each subset are independent with each other and can be fused together.
|
| 459 |
+
The type of subset is list, so we can preserve node's order and benefit from split-cat elimination in later pass.
|
| 460 |
+
"""
|
| 461 |
+
visited_node_set = set()
|
| 462 |
+
dep_set = set()
|
| 463 |
+
|
| 464 |
+
def find_dependent_nodes(src_node, cur_node):
|
| 465 |
+
for input_node in cur_node.all_input_nodes:
|
| 466 |
+
if input_node in node_list:
|
| 467 |
+
dep_set.add(input_node)
|
| 468 |
+
|
| 469 |
+
if input_node not in visited_node_set:
|
| 470 |
+
visited_node_set.add(input_node)
|
| 471 |
+
find_dependent_nodes(src_node, input_node)
|
| 472 |
+
|
| 473 |
+
while len(node_list) > 0:
|
| 474 |
+
subset = []
|
| 475 |
+
subset_deps = set()
|
| 476 |
+
|
| 477 |
+
for node in node_list:
|
| 478 |
+
if len(subset) >= MAX_FUSE_SET_SIZE:
|
| 479 |
+
break
|
| 480 |
+
|
| 481 |
+
visited_node_set.clear()
|
| 482 |
+
dep_set.clear()
|
| 483 |
+
|
| 484 |
+
find_dependent_nodes(node, node)
|
| 485 |
+
if not dep_set.intersection(subset) and node not in subset_deps:
|
| 486 |
+
subset.append(node)
|
| 487 |
+
subset_deps.update(dep_set)
|
| 488 |
+
|
| 489 |
+
if len(subset) >= MIN_FUSE_SET_SIZE:
|
| 490 |
+
yield subset
|
| 491 |
+
|
| 492 |
+
next_round_node_list = [node for node in node_list if node not in subset]
|
| 493 |
+
node_list = next_round_node_list
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def get_fusion_candidates(rule, root_node, fused_set):
|
| 497 |
+
"""
|
| 498 |
+
Search fusion candidates for a specific rule using BFS starting from the root node.
|
| 499 |
+
We only search the subgraph within MAX_FUSE_SEARCH_DEPTH.
|
| 500 |
+
"""
|
| 501 |
+
q = collections.deque()
|
| 502 |
+
|
| 503 |
+
candidate_dict = collections.defaultdict(list)
|
| 504 |
+
visited_set = set()
|
| 505 |
+
|
| 506 |
+
for next_node in root_node.all_input_nodes:
|
| 507 |
+
q.append((1, next_node))
|
| 508 |
+
visited_set.add(next_node)
|
| 509 |
+
|
| 510 |
+
while len(q) > 0:
|
| 511 |
+
depth, node = q.popleft()
|
| 512 |
+
|
| 513 |
+
if node in fused_set:
|
| 514 |
+
continue
|
| 515 |
+
|
| 516 |
+
key = rule.match(node)
|
| 517 |
+
if key is not None:
|
| 518 |
+
candidate_nodes = candidate_dict[key]
|
| 519 |
+
if node not in candidate_nodes:
|
| 520 |
+
candidate_nodes.append(node)
|
| 521 |
+
else:
|
| 522 |
+
if depth < MAX_FUSE_SEARCH_DEPTH:
|
| 523 |
+
for next_node in node.all_input_nodes:
|
| 524 |
+
if next_node not in visited_set:
|
| 525 |
+
visited_set.add(next_node)
|
| 526 |
+
q.append((depth + 1, next_node))
|
| 527 |
+
|
| 528 |
+
return candidate_dict
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def apply_group_batch_fusion(graph, rule):
|
| 532 |
+
stable_topological_sort(graph)
|
| 533 |
+
fused_set = set()
|
| 534 |
+
|
| 535 |
+
for node in reversed(graph.nodes):
|
| 536 |
+
candidates = get_fusion_candidates(rule, node, fused_set)
|
| 537 |
+
|
| 538 |
+
for key, candidate_nodes in candidates.items():
|
| 539 |
+
if len(candidate_nodes) < MIN_FUSE_SET_SIZE:
|
| 540 |
+
continue
|
| 541 |
+
|
| 542 |
+
for subset in find_independent_subset_greedy(candidate_nodes):
|
| 543 |
+
rule.fuse(graph, subset)
|
| 544 |
+
fused_set.update(subset)
|
| 545 |
+
if isinstance(rule, GroupFusion):
|
| 546 |
+
counters["inductor"]["group_fusion"] += 1
|
| 547 |
+
else:
|
| 548 |
+
counters["inductor"]["batch_fusion"] += 1
|
| 549 |
+
|
| 550 |
+
log.info(
|
| 551 |
+
f"{rule.__class__.__name__}: key = {key}; subset size = {len(subset)}" # noqa: G004
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
def group_batch_fusion_post_grad_passes(graph: torch.fx.Graph):
|
| 556 |
+
fusions = []
|
| 557 |
+
|
| 558 |
+
if config.group_fusion and has_fbgemm:
|
| 559 |
+
fusions += [GroupLinearFusion()]
|
| 560 |
+
|
| 561 |
+
for rule in fusions:
|
| 562 |
+
apply_group_batch_fusion(graph, rule)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def group_batch_fusion_pre_grad_passes(graph: torch.fx.Graph):
|
| 566 |
+
fusions = []
|
| 567 |
+
if config.batch_fusion:
|
| 568 |
+
fusions += [
|
| 569 |
+
BatchLinearFusion(),
|
| 570 |
+
BatchLinearLHSFusion(),
|
| 571 |
+
BatchLayernormFusion(),
|
| 572 |
+
BatchTanhFusion(),
|
| 573 |
+
]
|
| 574 |
+
for rule in fusions:
|
| 575 |
+
apply_group_batch_fusion(graph, rule)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/joint_graph.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from collections import Counter
|
| 3 |
+
from typing import Set
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch._guards
|
| 7 |
+
from .. import config
|
| 8 |
+
from ..pattern_matcher import (
|
| 9 |
+
CallFunction,
|
| 10 |
+
init_once_fakemode,
|
| 11 |
+
KeywordArg,
|
| 12 |
+
Match,
|
| 13 |
+
PatternMatcherPass,
|
| 14 |
+
register_graph_pattern,
|
| 15 |
+
stable_topological_sort,
|
| 16 |
+
)
|
| 17 |
+
from .replace_random import replace_random_passes
|
| 18 |
+
|
| 19 |
+
log = logging.getLogger(__name__)
|
| 20 |
+
patterns = PatternMatcherPass()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@init_once_fakemode
|
| 24 |
+
def lazy_init():
|
| 25 |
+
from .fuse_attention import _sfdp_init
|
| 26 |
+
from .pad_mm import _pad_mm_init
|
| 27 |
+
|
| 28 |
+
_pad_mm_init()
|
| 29 |
+
_sfdp_init()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 33 |
+
def remove_no_ops(
|
| 34 |
+
gm: torch.fx.GraphModule, zeros: Set[torch.fx.Node], ones: Set[torch.fx.Node]
|
| 35 |
+
):
|
| 36 |
+
"Removes no-ops: (+ 0, - 0, * 1, / 1)"
|
| 37 |
+
aten = torch.ops.aten
|
| 38 |
+
graph = gm.graph
|
| 39 |
+
|
| 40 |
+
def fake_tensors_eq(t1, t2, fields=("shape", "dtype", "device")):
|
| 41 |
+
for field in fields:
|
| 42 |
+
if getattr(t1, field) != getattr(t2, field):
|
| 43 |
+
return False
|
| 44 |
+
return True
|
| 45 |
+
|
| 46 |
+
def replace_no_op(node, replace_input_index):
|
| 47 |
+
replacement = node.args[replace_input_index]
|
| 48 |
+
|
| 49 |
+
# https://github.com/pytorch/pytorch/issues/86128 causes
|
| 50 |
+
# non-Tensor inputs even for ops with only Tensor inputs.
|
| 51 |
+
# TODO - decompose/type promote to avoid this
|
| 52 |
+
if not all(isinstance(arg, torch.fx.Node) for arg in node.args):
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
+
if not fake_tensors_eq(node.meta["val"], replacement.meta["val"]):
|
| 56 |
+
if fake_tensors_eq(
|
| 57 |
+
node.meta["val"],
|
| 58 |
+
replacement.meta["val"],
|
| 59 |
+
("shape", "device"),
|
| 60 |
+
):
|
| 61 |
+
with graph.inserting_after(node):
|
| 62 |
+
replacement = graph.call_function(
|
| 63 |
+
torch.ops.prims.convert_element_type.default,
|
| 64 |
+
args=(replacement, node.meta["val"].dtype),
|
| 65 |
+
)
|
| 66 |
+
else:
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
node.replace_all_uses_with(replacement)
|
| 70 |
+
replacement.meta.update(node.meta)
|
| 71 |
+
graph.erase_node(node)
|
| 72 |
+
|
| 73 |
+
for node in graph.nodes:
|
| 74 |
+
if node.op != "call_function":
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
# TODO handle Tensor-Scalar adds, it's a different schema
|
| 78 |
+
if node.target == aten.add.Tensor and len(node.args) == 2:
|
| 79 |
+
if (
|
| 80 |
+
not any(e in zeros for e in node.args)
|
| 81 |
+
or node.kwargs.get("alpha", 1) != 1
|
| 82 |
+
):
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
replace_index = 1 if node.args[0] in zeros else 0
|
| 86 |
+
replace_no_op(node, replace_index)
|
| 87 |
+
|
| 88 |
+
elif node.target == aten.sub.Tensor and len(node.args) == 2:
|
| 89 |
+
if node.args[1] not in zeros or node.kwargs.get("alpha", 1) != 1:
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
replace_no_op(node, 0)
|
| 93 |
+
|
| 94 |
+
elif node.target == aten.mul.Tensor and len(node.args) == 2:
|
| 95 |
+
if not any(e in ones for e in node.args):
|
| 96 |
+
continue
|
| 97 |
+
|
| 98 |
+
replace_input_index = 1 if node.args[0] in ones else 0
|
| 99 |
+
replace_no_op(node, replace_input_index)
|
| 100 |
+
|
| 101 |
+
elif (
|
| 102 |
+
node.target == aten.div.Tensor
|
| 103 |
+
and len(node.args) == 2
|
| 104 |
+
and node.args[1] in ones
|
| 105 |
+
):
|
| 106 |
+
replace_no_op(node, 0)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@torch.utils._python_dispatch._disable_current_modes()
|
| 110 |
+
def constant_fold_uniform_value(gm):
|
| 111 |
+
"Runs constant folding and replaces constants which can be constructed with a single `full` call. Calls into remove_no_ops."
|
| 112 |
+
aten = torch.ops.aten
|
| 113 |
+
from torch._inductor.freezing import ConstantFolder
|
| 114 |
+
|
| 115 |
+
def is_uniform_valued_tensor(t):
|
| 116 |
+
return t.numel() != 0 and (t == t.flatten()[0]).all()
|
| 117 |
+
|
| 118 |
+
cf = ConstantFolder(gm, insertable_tensor_check=is_uniform_valued_tensor)
|
| 119 |
+
cf.run()
|
| 120 |
+
|
| 121 |
+
node_replacements = cf.node_replacements
|
| 122 |
+
graph = gm.graph
|
| 123 |
+
|
| 124 |
+
zeros = set()
|
| 125 |
+
ones = set()
|
| 126 |
+
|
| 127 |
+
# Got failures in `test_is_set_to_cuda` if we change aliasing on constants,
|
| 128 |
+
# so just constant-ify if a Tensor is unaliased
|
| 129 |
+
constant_data_ptrs = Counter()
|
| 130 |
+
|
| 131 |
+
for constant in node_replacements.values():
|
| 132 |
+
if (
|
| 133 |
+
constant.numel() != 0
|
| 134 |
+
and torch._C._has_storage(constant)
|
| 135 |
+
and constant.layout == torch.strided
|
| 136 |
+
):
|
| 137 |
+
constant_data_ptrs[constant.untyped_storage().data_ptr()] += 1
|
| 138 |
+
|
| 139 |
+
for node, constant in node_replacements.items():
|
| 140 |
+
# Constant folding can leak memory, especially with repeated compilation, so we are only going to
|
| 141 |
+
# remove constants which can be replaced with a constructor.
|
| 142 |
+
|
| 143 |
+
# TODO - we could also Tensors which get replaced with arange here
|
| 144 |
+
if not is_uniform_valued_tensor(constant):
|
| 145 |
+
continue
|
| 146 |
+
|
| 147 |
+
# we dont have a functional way right now of instantiating a non-contiguous tensor with full/zeros/ones right now
|
| 148 |
+
# hasn't shown up to be important yet
|
| 149 |
+
if (
|
| 150 |
+
not constant.is_contiguous(memory_format=torch.contiguous_format)
|
| 151 |
+
or not constant.layout == torch.strided
|
| 152 |
+
):
|
| 153 |
+
continue
|
| 154 |
+
|
| 155 |
+
if (
|
| 156 |
+
torch._C._has_storage(constant)
|
| 157 |
+
and constant_data_ptrs[constant.untyped_storage().data_ptr()] != 1
|
| 158 |
+
):
|
| 159 |
+
continue
|
| 160 |
+
|
| 161 |
+
value = constant.flatten()[0].item()
|
| 162 |
+
|
| 163 |
+
with graph.inserting_after(node):
|
| 164 |
+
# the conversion from tensor and back to value can be lossy, just use the original full ctor value
|
| 165 |
+
if (
|
| 166 |
+
node.op == "call_function"
|
| 167 |
+
and node.target == aten.full.default
|
| 168 |
+
and len(node.args) == 2
|
| 169 |
+
):
|
| 170 |
+
value = node.args[1]
|
| 171 |
+
|
| 172 |
+
# zeros, and ones just get traced into full, so we insert those
|
| 173 |
+
new_node = graph.call_function(
|
| 174 |
+
aten.full.default,
|
| 175 |
+
args=(list(constant.shape), value),
|
| 176 |
+
kwargs={
|
| 177 |
+
"dtype": constant.dtype,
|
| 178 |
+
"layout": torch.strided,
|
| 179 |
+
"device": constant.device,
|
| 180 |
+
"pin_memory": False,
|
| 181 |
+
},
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
new_node.meta.update(node.meta)
|
| 185 |
+
node.replace_all_uses_with(new_node)
|
| 186 |
+
graph.erase_node(node)
|
| 187 |
+
|
| 188 |
+
if value == 0:
|
| 189 |
+
zeros.add(new_node)
|
| 190 |
+
elif value == 1:
|
| 191 |
+
ones.add(new_node)
|
| 192 |
+
|
| 193 |
+
remove_no_ops(gm, zeros, ones)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def joint_graph_passes(graph: torch.fx.GraphModule):
|
| 197 |
+
"""
|
| 198 |
+
Run FX transformations on the joint forwards+backwards graph.
|
| 199 |
+
"""
|
| 200 |
+
lazy_init()
|
| 201 |
+
count = 0
|
| 202 |
+
|
| 203 |
+
if config.joint_graph_constant_folding:
|
| 204 |
+
constant_fold_uniform_value(graph)
|
| 205 |
+
|
| 206 |
+
if config.pattern_matcher:
|
| 207 |
+
count += patterns.apply(graph.graph)
|
| 208 |
+
|
| 209 |
+
if not config.fallback_random:
|
| 210 |
+
count += replace_random_passes(graph)
|
| 211 |
+
|
| 212 |
+
if count:
|
| 213 |
+
stable_topological_sort(graph.graph)
|
| 214 |
+
graph.graph.lint()
|
| 215 |
+
graph.recompile()
|
| 216 |
+
return graph
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
@register_graph_pattern(
|
| 220 |
+
CallFunction(
|
| 221 |
+
torch.ops.prims.convert_element_type.default,
|
| 222 |
+
CallFunction(
|
| 223 |
+
torch.ops.prims.convert_element_type.default,
|
| 224 |
+
KeywordArg("arg"),
|
| 225 |
+
KeywordArg("dtype1"),
|
| 226 |
+
),
|
| 227 |
+
KeywordArg("dtype2"),
|
| 228 |
+
),
|
| 229 |
+
pass_dict=patterns,
|
| 230 |
+
)
|
| 231 |
+
def pointless_convert(match: Match, arg, dtype1, dtype2):
|
| 232 |
+
"""Remove chain of dtype conversions often created by AMP"""
|
| 233 |
+
graph = match.graph
|
| 234 |
+
node = match.output_node()
|
| 235 |
+
allowed = {torch.float16, torch.bfloat16, torch.float32, torch.float64}
|
| 236 |
+
if dtype1 in allowed and dtype2 in allowed:
|
| 237 |
+
repl = graph.call_function(
|
| 238 |
+
torch.ops.prims.convert_element_type.default, (arg, dtype2)
|
| 239 |
+
)
|
| 240 |
+
repl.meta.update(node.meta)
|
| 241 |
+
node.replace_all_uses_with(repl)
|
| 242 |
+
match.erase_nodes(graph)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
@register_graph_pattern(
|
| 246 |
+
CallFunction(torch.ops.aten.view.default, KeywordArg("arg"), KeywordArg("size")),
|
| 247 |
+
pass_dict=patterns,
|
| 248 |
+
)
|
| 249 |
+
def pointless_view(match: Match, arg, size):
|
| 250 |
+
"""Remove no-op view"""
|
| 251 |
+
graph = match.graph
|
| 252 |
+
node = match.output_node()
|
| 253 |
+
arg_size = list(node.args[0].meta["val"].shape)
|
| 254 |
+
if size == arg_size:
|
| 255 |
+
node.replace_all_uses_with(node.args[0])
|
| 256 |
+
match.erase_nodes(graph)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/mkldnn_fusion.py
ADDED
|
@@ -0,0 +1,1080 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import operator
|
| 3 |
+
from functools import reduce
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from torch.fx.experimental.symbolic_shapes import free_symbols
|
| 8 |
+
|
| 9 |
+
from .. import ir
|
| 10 |
+
|
| 11 |
+
from ..lowering import lowerings as L
|
| 12 |
+
from ..pattern_matcher import (
|
| 13 |
+
Arg,
|
| 14 |
+
CallFunction,
|
| 15 |
+
filter_nodes,
|
| 16 |
+
get_arg_value,
|
| 17 |
+
KeywordArg,
|
| 18 |
+
MULTIPLE,
|
| 19 |
+
)
|
| 20 |
+
from ..virtualized import ops
|
| 21 |
+
from .freezing_patterns import register_freezing_graph_pattern
|
| 22 |
+
from .post_grad import register_lowering_pattern
|
| 23 |
+
from .quantization import (
|
| 24 |
+
_register_quantization_lowerings,
|
| 25 |
+
_register_quantization_weight_pack_pass,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
if torch._C._has_mkldnn:
|
| 29 |
+
aten = torch.ops.aten
|
| 30 |
+
mkldnn = torch.ops.mkldnn
|
| 31 |
+
prims = torch.ops.prims
|
| 32 |
+
|
| 33 |
+
_conv_args = [Arg() for _ in range(10)]
|
| 34 |
+
_linear_args = [Arg() for _ in range(6)]
|
| 35 |
+
_conv_transpose_args = [Arg() for _ in range(11)]
|
| 36 |
+
|
| 37 |
+
def _conv_call(users=1):
|
| 38 |
+
return CallFunction(
|
| 39 |
+
mkldnn._convolution_pointwise.default, *_conv_args, _users=users
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def _linear_call(users=1):
|
| 43 |
+
return CallFunction(
|
| 44 |
+
mkldnn._linear_pointwise.default, *_linear_args, _users=users
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
def _conv_transpose_call(users=1):
|
| 48 |
+
return CallFunction(
|
| 49 |
+
mkldnn._convolution_transpose_pointwise.default,
|
| 50 |
+
*_conv_transpose_args,
|
| 51 |
+
_users=users,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def _to_float(input_call, users=1):
|
| 55 |
+
return CallFunction(
|
| 56 |
+
prims.convert_element_type.default,
|
| 57 |
+
input_call,
|
| 58 |
+
KeywordArg("to_float"),
|
| 59 |
+
_users=users,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def _to_bf16(input_call):
|
| 63 |
+
return CallFunction(
|
| 64 |
+
prims.convert_element_type.default,
|
| 65 |
+
input_call,
|
| 66 |
+
KeywordArg("to_bf16"),
|
| 67 |
+
_users=1,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def _unary_fusion_pattern(unary_fusion, call_fn, users, is_bf16):
|
| 71 |
+
# only insert to_dtype if is_bf16 is True
|
| 72 |
+
computation_call = (
|
| 73 |
+
_to_float(call_fn(), users=users) if is_bf16 else call_fn(users=users)
|
| 74 |
+
)
|
| 75 |
+
out = unary_fusion(computation_call)
|
| 76 |
+
return _to_bf16(out) if is_bf16 else out
|
| 77 |
+
|
| 78 |
+
def _gelu_fusion_1(computation_call):
|
| 79 |
+
return CallFunction(
|
| 80 |
+
aten.mul,
|
| 81 |
+
CallFunction(aten.mul, computation_call, 0.5),
|
| 82 |
+
CallFunction(
|
| 83 |
+
aten.add,
|
| 84 |
+
CallFunction(
|
| 85 |
+
aten.erf,
|
| 86 |
+
CallFunction(aten.mul, computation_call, 0.7071067811865476),
|
| 87 |
+
),
|
| 88 |
+
1,
|
| 89 |
+
),
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
def _gelu_fusion_2(computation_call):
|
| 93 |
+
return CallFunction(
|
| 94 |
+
aten.mul,
|
| 95 |
+
CallFunction(aten.mul, computation_call, 0.5),
|
| 96 |
+
CallFunction(
|
| 97 |
+
aten.add,
|
| 98 |
+
CallFunction(
|
| 99 |
+
aten.tanh,
|
| 100 |
+
CallFunction(
|
| 101 |
+
aten.mul,
|
| 102 |
+
CallFunction(
|
| 103 |
+
aten.add,
|
| 104 |
+
computation_call,
|
| 105 |
+
CallFunction(
|
| 106 |
+
aten.mul,
|
| 107 |
+
CallFunction(
|
| 108 |
+
aten.mul,
|
| 109 |
+
CallFunction(
|
| 110 |
+
aten.mul, computation_call, computation_call
|
| 111 |
+
),
|
| 112 |
+
computation_call,
|
| 113 |
+
),
|
| 114 |
+
0.044715,
|
| 115 |
+
),
|
| 116 |
+
),
|
| 117 |
+
0.7978845608028654,
|
| 118 |
+
),
|
| 119 |
+
),
|
| 120 |
+
1,
|
| 121 |
+
),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
def _hardswish_fusion(computation_call):
|
| 125 |
+
return CallFunction(
|
| 126 |
+
aten.div,
|
| 127 |
+
CallFunction(
|
| 128 |
+
aten.mul,
|
| 129 |
+
computation_call,
|
| 130 |
+
CallFunction(
|
| 131 |
+
aten.clamp_max,
|
| 132 |
+
CallFunction(
|
| 133 |
+
aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
|
| 134 |
+
),
|
| 135 |
+
6,
|
| 136 |
+
),
|
| 137 |
+
),
|
| 138 |
+
6,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
def _silu_fusion(computation_call):
|
| 142 |
+
return CallFunction(
|
| 143 |
+
aten.mul, computation_call, CallFunction(aten.sigmoid, computation_call)
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
def _hardsigmoid_fusion(computation_call):
|
| 147 |
+
return CallFunction(
|
| 148 |
+
aten.div,
|
| 149 |
+
CallFunction(
|
| 150 |
+
aten.clamp_max,
|
| 151 |
+
CallFunction(
|
| 152 |
+
aten.clamp_min, CallFunction(aten.add, computation_call, 3), 0
|
| 153 |
+
),
|
| 154 |
+
6,
|
| 155 |
+
),
|
| 156 |
+
6,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
def _leaky_relu_fusion(computation_call):
|
| 160 |
+
return CallFunction(
|
| 161 |
+
aten.where,
|
| 162 |
+
CallFunction(aten.gt, computation_call, 0),
|
| 163 |
+
computation_call,
|
| 164 |
+
CallFunction(aten.mul, computation_call, KeywordArg("negative_slope")),
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
def _hardtanh_fusion(computation_call):
|
| 168 |
+
return CallFunction(
|
| 169 |
+
aten.clamp_max,
|
| 170 |
+
CallFunction(aten.clamp_min, computation_call, KeywordArg("min_value")),
|
| 171 |
+
KeywordArg("max_value"),
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
def _combined_fusion(computation_call, elementwise_op):
|
| 175 |
+
return CallFunction(elementwise_op, computation_call)
|
| 176 |
+
|
| 177 |
+
# binary_op(other, computation_op)
|
| 178 |
+
def _binary_fusion_v1(computation_call, binary_fn):
|
| 179 |
+
return CallFunction(binary_fn, KeywordArg("other"), computation_call)
|
| 180 |
+
|
| 181 |
+
# binary_op(computation_op, other)
|
| 182 |
+
def _binary_fusion_v2(computation_call, binary_fn):
|
| 183 |
+
return CallFunction(binary_fn, computation_call, KeywordArg("other"))
|
| 184 |
+
|
| 185 |
+
def _is_single_computation_op(computation_op):
|
| 186 |
+
def fn(match):
|
| 187 |
+
computation_nodes = filter_nodes(match.nodes, computation_op)
|
| 188 |
+
if len(computation_nodes) < 1:
|
| 189 |
+
return False
|
| 190 |
+
if any(n.args[-3] != "none" for n in computation_nodes):
|
| 191 |
+
return False
|
| 192 |
+
return True
|
| 193 |
+
|
| 194 |
+
return fn
|
| 195 |
+
|
| 196 |
+
def _is_valid_computation_unary_fusion(computation_op, is_bf16=False):
|
| 197 |
+
def fn(match):
|
| 198 |
+
matched = _is_single_computation_op(computation_op)(match)
|
| 199 |
+
computation_node = filter_nodes(match.nodes, computation_op)[0]
|
| 200 |
+
if is_bf16:
|
| 201 |
+
conversion_dtype_nodes = filter_nodes(
|
| 202 |
+
match.nodes, prims.convert_element_type.default
|
| 203 |
+
)
|
| 204 |
+
if len(conversion_dtype_nodes) != 2:
|
| 205 |
+
return False
|
| 206 |
+
# fusion pattern is always in the form of computation_op + to_float32 + unary_op + to_bfloat16
|
| 207 |
+
if computation_node == conversion_dtype_nodes[0].args[0]:
|
| 208 |
+
to_float = conversion_dtype_nodes[0].args[1]
|
| 209 |
+
to_bf16 = conversion_dtype_nodes[1].args[1]
|
| 210 |
+
else:
|
| 211 |
+
to_float = conversion_dtype_nodes[1].args[1]
|
| 212 |
+
to_bf16 = conversion_dtype_nodes[0].args[1]
|
| 213 |
+
matched = (
|
| 214 |
+
matched and to_float == torch.float and to_bf16 == torch.bfloat16
|
| 215 |
+
)
|
| 216 |
+
return matched
|
| 217 |
+
|
| 218 |
+
return fn
|
| 219 |
+
|
| 220 |
+
def _register_unary_fusion_lowering(
|
| 221 |
+
pattern, unary_attr, computation_op, is_bf16=False
|
| 222 |
+
):
|
| 223 |
+
@register_lowering_pattern(
|
| 224 |
+
pattern,
|
| 225 |
+
extra_check=_is_valid_computation_unary_fusion(computation_op, is_bf16),
|
| 226 |
+
)
|
| 227 |
+
def fn(match, *args, **kwargs):
|
| 228 |
+
computation_args = list(args)[:-3] + [
|
| 229 |
+
unary_attr.op_name,
|
| 230 |
+
unary_attr.scalars_attr,
|
| 231 |
+
unary_attr.algorithm_attr,
|
| 232 |
+
]
|
| 233 |
+
return L[computation_op](*computation_args)
|
| 234 |
+
|
| 235 |
+
return fn
|
| 236 |
+
|
| 237 |
+
def _register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16=False):
|
| 238 |
+
@register_lowering_pattern(
|
| 239 |
+
pattern, extra_check=_is_single_computation_op(computation_op)
|
| 240 |
+
)
|
| 241 |
+
def fn(match, *args, **kwargs):
|
| 242 |
+
negative_slope = kwargs.get("negative_slope")
|
| 243 |
+
if isinstance(negative_slope, ir.TensorBox):
|
| 244 |
+
matched = False
|
| 245 |
+
else: # inp is a Number
|
| 246 |
+
matched = True
|
| 247 |
+
if is_bf16:
|
| 248 |
+
dtype1 = kwargs.get("to_float")
|
| 249 |
+
dtype2 = kwargs.get("to_bf16")
|
| 250 |
+
matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16
|
| 251 |
+
computation_args = list(args)
|
| 252 |
+
if matched:
|
| 253 |
+
computation_args = computation_args[:-3] + [
|
| 254 |
+
"leaky_relu",
|
| 255 |
+
[negative_slope],
|
| 256 |
+
"",
|
| 257 |
+
]
|
| 258 |
+
return L[computation_op](*computation_args)
|
| 259 |
+
else:
|
| 260 |
+
# computation_args += ["none", [], ""]
|
| 261 |
+
out = L[computation_op](*computation_args)
|
| 262 |
+
if is_bf16:
|
| 263 |
+
out = L[prims.convert_element_type.default](out, dtype=torch.float)
|
| 264 |
+
out = L[aten.where](
|
| 265 |
+
L[aten.gt](out, 0),
|
| 266 |
+
out,
|
| 267 |
+
L[aten.mul](out, negative_slope),
|
| 268 |
+
)
|
| 269 |
+
if is_bf16:
|
| 270 |
+
out = L[prims.convert_element_type.default](
|
| 271 |
+
out, dtype=torch.bfloat16
|
| 272 |
+
)
|
| 273 |
+
return out
|
| 274 |
+
|
| 275 |
+
return fn
|
| 276 |
+
|
| 277 |
+
def _register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16=False):
|
| 278 |
+
@register_lowering_pattern(
|
| 279 |
+
pattern, extra_check=_is_single_computation_op(computation_op)
|
| 280 |
+
)
|
| 281 |
+
def fn(match, *args, **kwargs):
|
| 282 |
+
min_value = kwargs.get("min_value")
|
| 283 |
+
max_value = kwargs.get("max_value")
|
| 284 |
+
if isinstance(min_value, ir.TensorBox) or isinstance(
|
| 285 |
+
max_value, ir.TensorBox
|
| 286 |
+
):
|
| 287 |
+
matched = False
|
| 288 |
+
else: # inp is a Number
|
| 289 |
+
matched = min_value <= max_value
|
| 290 |
+
if is_bf16:
|
| 291 |
+
dtype1 = kwargs.get("to_float")
|
| 292 |
+
dtype2 = kwargs.get("to_bf16")
|
| 293 |
+
matched = matched and dtype1 == torch.float and dtype2 == torch.bfloat16
|
| 294 |
+
computation_args = list(args)
|
| 295 |
+
if matched:
|
| 296 |
+
computation_args = computation_args[:-3] + [
|
| 297 |
+
"hardtanh",
|
| 298 |
+
[min_value, max_value],
|
| 299 |
+
"",
|
| 300 |
+
]
|
| 301 |
+
return L[computation_op](*computation_args)
|
| 302 |
+
else:
|
| 303 |
+
out = L[computation_op](*computation_args)
|
| 304 |
+
if is_bf16:
|
| 305 |
+
out = L[prims.convert_element_type.default](out, dtype=torch.float)
|
| 306 |
+
out = L[aten.clamp_max](L[aten.clamp_min](out, min_value), max_value)
|
| 307 |
+
if is_bf16:
|
| 308 |
+
out = L[prims.convert_element_type.default](
|
| 309 |
+
out, dtype=torch.bfloat16
|
| 310 |
+
)
|
| 311 |
+
return out
|
| 312 |
+
|
| 313 |
+
return fn
|
| 314 |
+
|
| 315 |
+
_binary_attr = {
|
| 316 |
+
aten.add: "add",
|
| 317 |
+
ops.add: "add",
|
| 318 |
+
aten.sub: "sub",
|
| 319 |
+
ops.sub: "sub",
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
def _is_valid_binary(match, fn):
|
| 323 |
+
binary_nodes = filter_nodes(match.nodes, fn)
|
| 324 |
+
if len(binary_nodes) < 1:
|
| 325 |
+
return False
|
| 326 |
+
if any(
|
| 327 |
+
not (
|
| 328 |
+
hasattr(n.args[0], "meta")
|
| 329 |
+
and isinstance(n.args[0].meta.get("val", None), torch.Tensor)
|
| 330 |
+
)
|
| 331 |
+
or not (
|
| 332 |
+
hasattr(n.args[1], "meta")
|
| 333 |
+
and isinstance(n.args[1].meta.get("val", None), torch.Tensor)
|
| 334 |
+
)
|
| 335 |
+
for n in binary_nodes
|
| 336 |
+
):
|
| 337 |
+
return False
|
| 338 |
+
# check alpha is one.
|
| 339 |
+
if any(
|
| 340 |
+
get_arg_value(n, 2, kwarg_name="alpha") != 1.0
|
| 341 |
+
and get_arg_value(n, 2, kwarg_name="alpha") is not None
|
| 342 |
+
for n in binary_nodes
|
| 343 |
+
):
|
| 344 |
+
return False
|
| 345 |
+
if any(
|
| 346 |
+
n.args[0].meta["val"].size() != n.args[1].meta["val"].size()
|
| 347 |
+
or n.args[0].meta["val"].device != n.args[1].meta["val"].device
|
| 348 |
+
or n.args[0].meta["val"].dtype != n.args[1].meta["val"].dtype
|
| 349 |
+
for n in binary_nodes
|
| 350 |
+
):
|
| 351 |
+
return False
|
| 352 |
+
# check args[0] and args[1] is not same
|
| 353 |
+
if any(n.args[0] == n.args[1] for n in binary_nodes):
|
| 354 |
+
return False
|
| 355 |
+
return True
|
| 356 |
+
|
| 357 |
+
def _is_valid_computation_binary(computation_op, binary_op, other_index=None):
|
| 358 |
+
def fn(match):
|
| 359 |
+
if not _is_single_computation_op(computation_op)(match):
|
| 360 |
+
return False
|
| 361 |
+
if not _is_valid_binary(match, binary_op):
|
| 362 |
+
return False
|
| 363 |
+
return True
|
| 364 |
+
|
| 365 |
+
return fn
|
| 366 |
+
|
| 367 |
+
def _is_valid_computation_binary_inplace(computation_op, binary_op, other_index):
|
| 368 |
+
def fn(match):
|
| 369 |
+
if not _is_valid_computation_binary(computation_op, binary_op)(match):
|
| 370 |
+
return False
|
| 371 |
+
binary_nodes = filter_nodes(match.nodes, binary_op)
|
| 372 |
+
if any(len(n.args[other_index].users) > 1 for n in binary_nodes):
|
| 373 |
+
return False
|
| 374 |
+
if any(
|
| 375 |
+
n.args[other_index].op in ["placeholder", "output"]
|
| 376 |
+
for n in binary_nodes
|
| 377 |
+
):
|
| 378 |
+
return False
|
| 379 |
+
return True
|
| 380 |
+
|
| 381 |
+
return fn
|
| 382 |
+
|
| 383 |
+
def _register_binary_unary_fusion_lowering(
|
| 384 |
+
pattern,
|
| 385 |
+
computation_op,
|
| 386 |
+
binary_op,
|
| 387 |
+
fusion_op,
|
| 388 |
+
unary_attr=None,
|
| 389 |
+
):
|
| 390 |
+
@register_lowering_pattern(
|
| 391 |
+
pattern, extra_check=_is_valid_computation_binary(computation_op, binary_op)
|
| 392 |
+
)
|
| 393 |
+
def fn(match, *args, **kwargs):
|
| 394 |
+
other = kwargs.get("other")
|
| 395 |
+
assert isinstance(other, ir.TensorBox)
|
| 396 |
+
binary_attr = _binary_attr[binary_op]
|
| 397 |
+
args_list = list(args)
|
| 398 |
+
computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
|
| 399 |
+
if len(args_list) > 6:
|
| 400 |
+
if unary_attr is not None:
|
| 401 |
+
computation_args += [
|
| 402 |
+
1.0,
|
| 403 |
+
unary_attr.op_name,
|
| 404 |
+
unary_attr.scalars_attr,
|
| 405 |
+
unary_attr.algorithm_attr,
|
| 406 |
+
]
|
| 407 |
+
else:
|
| 408 |
+
computation_args += [1.0, None, [], None]
|
| 409 |
+
return L[fusion_op](*computation_args)
|
| 410 |
+
|
| 411 |
+
return fn
|
| 412 |
+
|
| 413 |
+
def _register_binary_unary_maybe_inplace_fusion_lowering(
|
| 414 |
+
pattern,
|
| 415 |
+
computation_op,
|
| 416 |
+
binary_op,
|
| 417 |
+
inplace_fusion_op,
|
| 418 |
+
outplace_fusion_op,
|
| 419 |
+
unary_attr=None,
|
| 420 |
+
other_index=None,
|
| 421 |
+
):
|
| 422 |
+
@register_lowering_pattern(
|
| 423 |
+
pattern,
|
| 424 |
+
extra_check=_is_valid_computation_binary_inplace(
|
| 425 |
+
computation_op, binary_op, other_index
|
| 426 |
+
),
|
| 427 |
+
)
|
| 428 |
+
def fn(match, *args, **kwargs):
|
| 429 |
+
other = kwargs.get("other")
|
| 430 |
+
assert isinstance(other, ir.TensorBox)
|
| 431 |
+
binary_attr = _binary_attr[binary_op]
|
| 432 |
+
args_list = list(args)
|
| 433 |
+
computation_args = [args_list[0], other] + args_list[1:-3] + [binary_attr]
|
| 434 |
+
if len(args_list) > 6:
|
| 435 |
+
if unary_attr is not None:
|
| 436 |
+
computation_args += [
|
| 437 |
+
1.0,
|
| 438 |
+
unary_attr.op_name,
|
| 439 |
+
unary_attr.scalars_attr,
|
| 440 |
+
unary_attr.algorithm_attr,
|
| 441 |
+
]
|
| 442 |
+
else:
|
| 443 |
+
computation_args += [1.0, None, [], None]
|
| 444 |
+
# Make sure the other is not an alias or mutation(fx side doesn't has such info).
|
| 445 |
+
other.realize()
|
| 446 |
+
can_be_inplace = not (
|
| 447 |
+
isinstance(other.data, ir.ReinterpretView)
|
| 448 |
+
or isinstance(other.get_layout(), (ir.MutationLayout, ir.AliasedLayout))
|
| 449 |
+
)
|
| 450 |
+
if not can_be_inplace:
|
| 451 |
+
return L[outplace_fusion_op](*computation_args)
|
| 452 |
+
return L[inplace_fusion_op](*computation_args)
|
| 453 |
+
|
| 454 |
+
return fn
|
| 455 |
+
|
| 456 |
+
computation_ops = [
|
| 457 |
+
mkldnn._convolution_pointwise.default,
|
| 458 |
+
mkldnn._linear_pointwise.default,
|
| 459 |
+
mkldnn._convolution_transpose_pointwise.default,
|
| 460 |
+
]
|
| 461 |
+
|
| 462 |
+
class UnaryAttr:
|
| 463 |
+
def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
|
| 464 |
+
self.op_name = op_name
|
| 465 |
+
self.scalars_attr = scalars_attr if scalars_attr else []
|
| 466 |
+
self.algorithm_attr = algorithm_attr if algorithm_attr else ""
|
| 467 |
+
|
| 468 |
+
def _register_unary_fusion():
|
| 469 |
+
computation_call_fns = [_conv_call, _linear_call, _conv_transpose_call]
|
| 470 |
+
|
| 471 |
+
def _unary_fusion_patterns(is_bf16):
|
| 472 |
+
replacement_unary_fusion_patterns = {
|
| 473 |
+
UnaryAttr("gelu", algorithm_attr="tanh"): [
|
| 474 |
+
_unary_fusion_pattern(_gelu_fusion_2, call_fn, 4, is_bf16)
|
| 475 |
+
for call_fn in computation_call_fns
|
| 476 |
+
],
|
| 477 |
+
UnaryAttr("gelu", algorithm_attr="none"): [
|
| 478 |
+
_unary_fusion_pattern(_gelu_fusion_1, call_fn, 2, is_bf16)
|
| 479 |
+
for call_fn in computation_call_fns
|
| 480 |
+
],
|
| 481 |
+
UnaryAttr("hardswish"): [
|
| 482 |
+
_unary_fusion_pattern(_hardswish_fusion, call_fn, 2, is_bf16)
|
| 483 |
+
for call_fn in computation_call_fns
|
| 484 |
+
],
|
| 485 |
+
UnaryAttr("hardsigmoid"): [
|
| 486 |
+
_unary_fusion_pattern(_hardsigmoid_fusion, call_fn, 1, is_bf16)
|
| 487 |
+
for call_fn in computation_call_fns
|
| 488 |
+
],
|
| 489 |
+
UnaryAttr("swish"): [
|
| 490 |
+
_unary_fusion_pattern(_silu_fusion, call_fn, 2, is_bf16)
|
| 491 |
+
for call_fn in computation_call_fns
|
| 492 |
+
],
|
| 493 |
+
}
|
| 494 |
+
if not is_bf16:
|
| 495 |
+
call_user1 = [call_fn(users=1) for call_fn in computation_call_fns]
|
| 496 |
+
replacement_unary_fusion_patterns.update(
|
| 497 |
+
{
|
| 498 |
+
UnaryAttr("relu"): [
|
| 499 |
+
_combined_fusion(u, aten.relu) for u in call_user1
|
| 500 |
+
],
|
| 501 |
+
UnaryAttr("sigmoid"): [
|
| 502 |
+
_combined_fusion(u, aten.sigmoid) for u in call_user1
|
| 503 |
+
],
|
| 504 |
+
UnaryAttr("tanh"): [
|
| 505 |
+
_combined_fusion(u, aten.tanh) for u in call_user1
|
| 506 |
+
],
|
| 507 |
+
}
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
return replacement_unary_fusion_patterns
|
| 511 |
+
|
| 512 |
+
for is_bf16 in [True, False]:
|
| 513 |
+
replace_patterns = _unary_fusion_patterns(is_bf16)
|
| 514 |
+
for unary_attr, patterns in replace_patterns.items():
|
| 515 |
+
_register_unary_fusion_lowering(
|
| 516 |
+
patterns[0], unary_attr, computation_ops[0], is_bf16
|
| 517 |
+
)
|
| 518 |
+
_register_unary_fusion_lowering(
|
| 519 |
+
patterns[1], unary_attr, computation_ops[1], is_bf16
|
| 520 |
+
)
|
| 521 |
+
_register_unary_fusion_lowering(
|
| 522 |
+
patterns[2], unary_attr, computation_ops[2], is_bf16
|
| 523 |
+
)
|
| 524 |
+
_leaky_relu_patterns = [
|
| 525 |
+
_unary_fusion_pattern(_leaky_relu_fusion, call_fn, 3, is_bf16)
|
| 526 |
+
for call_fn in computation_call_fns
|
| 527 |
+
]
|
| 528 |
+
for pattern, computation_op in zip(_leaky_relu_patterns, computation_ops):
|
| 529 |
+
_register_leaky_relu_fusion_lowering(pattern, computation_op, is_bf16)
|
| 530 |
+
hardtanh_patterns = [
|
| 531 |
+
_unary_fusion_pattern(_hardtanh_fusion, call_fn, 1, is_bf16)
|
| 532 |
+
for call_fn in computation_call_fns
|
| 533 |
+
]
|
| 534 |
+
for pattern, computation_op in zip(hardtanh_patterns, computation_ops):
|
| 535 |
+
_register_hardtanh_fusion_lowering(pattern, computation_op, is_bf16)
|
| 536 |
+
|
| 537 |
+
def _register_inplace_fusion():
|
| 538 |
+
binary_ops = [aten.add, ops.add]
|
| 539 |
+
inplace_fusion_op = mkldnn._convolution_pointwise_.binary
|
| 540 |
+
outplace_fusion_op = mkldnn._convolution_pointwise.binary
|
| 541 |
+
conv_call = _conv_call(users=1)
|
| 542 |
+
conv_op = computation_ops[0]
|
| 543 |
+
for binary_op in binary_ops:
|
| 544 |
+
binary_v1 = _binary_fusion_v1(conv_call, binary_op)
|
| 545 |
+
binary_unary_v1 = _combined_fusion(binary_v1, aten.relu)
|
| 546 |
+
_register_binary_unary_maybe_inplace_fusion_lowering(
|
| 547 |
+
binary_unary_v1,
|
| 548 |
+
conv_op,
|
| 549 |
+
binary_op,
|
| 550 |
+
inplace_fusion_op,
|
| 551 |
+
outplace_fusion_op,
|
| 552 |
+
other_index=0,
|
| 553 |
+
unary_attr=UnaryAttr("relu"),
|
| 554 |
+
)
|
| 555 |
+
_register_binary_unary_maybe_inplace_fusion_lowering(
|
| 556 |
+
binary_v1,
|
| 557 |
+
conv_op,
|
| 558 |
+
binary_op,
|
| 559 |
+
inplace_fusion_op,
|
| 560 |
+
outplace_fusion_op,
|
| 561 |
+
other_index=0,
|
| 562 |
+
)
|
| 563 |
+
binary_v2 = _binary_fusion_v2(conv_call, binary_op)
|
| 564 |
+
binary_unary_v2 = _combined_fusion(binary_v2, aten.relu)
|
| 565 |
+
_register_binary_unary_maybe_inplace_fusion_lowering(
|
| 566 |
+
binary_unary_v2,
|
| 567 |
+
conv_op,
|
| 568 |
+
binary_op,
|
| 569 |
+
inplace_fusion_op,
|
| 570 |
+
outplace_fusion_op,
|
| 571 |
+
other_index=1,
|
| 572 |
+
unary_attr=UnaryAttr("relu"),
|
| 573 |
+
)
|
| 574 |
+
_register_binary_unary_maybe_inplace_fusion_lowering(
|
| 575 |
+
binary_v2,
|
| 576 |
+
conv_op,
|
| 577 |
+
binary_op,
|
| 578 |
+
inplace_fusion_op,
|
| 579 |
+
outplace_fusion_op,
|
| 580 |
+
other_index=1,
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
def _register_binary_fusion():
|
| 584 |
+
binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
|
| 585 |
+
fusion_ops = [
|
| 586 |
+
mkldnn._convolution_pointwise.binary,
|
| 587 |
+
mkldnn._linear_pointwise.binary,
|
| 588 |
+
]
|
| 589 |
+
_computation_user_1 = [_conv_call(users=1), _linear_call(users=1)]
|
| 590 |
+
for computation_call, computation_op, fusion_op in zip(
|
| 591 |
+
_computation_user_1, computation_ops[:-1], fusion_ops
|
| 592 |
+
):
|
| 593 |
+
for binary_op in binary_ops:
|
| 594 |
+
pattern = _binary_fusion_v2(computation_call, binary_op)
|
| 595 |
+
_register_binary_unary_fusion_lowering(
|
| 596 |
+
pattern, computation_op, binary_op, fusion_op
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
for binary_op in [aten.add, ops.add]:
|
| 600 |
+
pattern = _binary_fusion_v1(computation_call, binary_op)
|
| 601 |
+
_register_binary_unary_fusion_lowering(
|
| 602 |
+
pattern, computation_op, binary_op, fusion_op
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
def _register_binary_unary_fusion():
|
| 606 |
+
binary_ops = [aten.add, ops.add, aten.sub, ops.sub]
|
| 607 |
+
fusion_ops = [mkldnn._convolution_pointwise.binary]
|
| 608 |
+
_computation_user_1 = [_conv_call(users=1)]
|
| 609 |
+
for computation_call, computation_op, fusion_op in zip(
|
| 610 |
+
_computation_user_1, computation_ops[:-1], fusion_ops
|
| 611 |
+
):
|
| 612 |
+
for binary_op in binary_ops:
|
| 613 |
+
pattern_v1 = _combined_fusion(
|
| 614 |
+
_binary_fusion_v2(computation_call, binary_op), aten.relu
|
| 615 |
+
)
|
| 616 |
+
_register_binary_unary_fusion_lowering(
|
| 617 |
+
pattern_v1,
|
| 618 |
+
computation_op,
|
| 619 |
+
binary_op,
|
| 620 |
+
fusion_op,
|
| 621 |
+
unary_attr=UnaryAttr("relu"),
|
| 622 |
+
)
|
| 623 |
+
for binary_op in [aten.add, ops.add]:
|
| 624 |
+
pattern_v2 = _combined_fusion(
|
| 625 |
+
_binary_fusion_v1(computation_call, binary_op), aten.relu
|
| 626 |
+
)
|
| 627 |
+
_register_binary_unary_fusion_lowering(
|
| 628 |
+
pattern_v2,
|
| 629 |
+
computation_op,
|
| 630 |
+
binary_op,
|
| 631 |
+
fusion_op,
|
| 632 |
+
unary_attr=UnaryAttr("relu"),
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
def _recover_linear():
|
| 636 |
+
# convert reshape+linear+reshape to a single linear for applying fusion path.
|
| 637 |
+
@register_freezing_graph_pattern(
|
| 638 |
+
CallFunction(
|
| 639 |
+
aten.reshape.default,
|
| 640 |
+
CallFunction(
|
| 641 |
+
mkldnn._linear_pointwise.default,
|
| 642 |
+
CallFunction(
|
| 643 |
+
aten.reshape.default,
|
| 644 |
+
Arg(),
|
| 645 |
+
KeywordArg("reshape_1"),
|
| 646 |
+
_users=MULTIPLE,
|
| 647 |
+
),
|
| 648 |
+
Arg(),
|
| 649 |
+
Arg(),
|
| 650 |
+
Arg(),
|
| 651 |
+
Arg(),
|
| 652 |
+
Arg(),
|
| 653 |
+
),
|
| 654 |
+
KeywordArg("reshape_2"),
|
| 655 |
+
),
|
| 656 |
+
pass_number=1,
|
| 657 |
+
)
|
| 658 |
+
def reshape_linear_reshape_pattern(match, *args, **kwargs):
|
| 659 |
+
reshape_1 = kwargs.get("reshape_1")
|
| 660 |
+
reshape_2 = kwargs.get("reshape_2")
|
| 661 |
+
assert len(reshape_1) == 2
|
| 662 |
+
dynamic_shapes = not all(
|
| 663 |
+
isinstance(x, int) for x in ([reshape_1[0]] + reshape_2[:-1])
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
graph = match.graph
|
| 667 |
+
reshape_2_node = match.output_node()
|
| 668 |
+
linear_input_node = reshape_2_node.args[0].args[0].args[0]
|
| 669 |
+
# check linear's input's shape[:-1] == reshape_2[:-1]
|
| 670 |
+
# and check product(reshape_2[:-1]) == reshape_1[0]
|
| 671 |
+
if dynamic_shapes:
|
| 672 |
+
# TODO: Haozhe investigate how add guard here
|
| 673 |
+
return
|
| 674 |
+
else:
|
| 675 |
+
can_remove_reshape = linear_input_node.meta.get("val").shape[
|
| 676 |
+
:-1
|
| 677 |
+
] == torch.Size(reshape_2[:-1])
|
| 678 |
+
can_remove_reshape = can_remove_reshape and (
|
| 679 |
+
reduce(lambda x, y: x * y, reshape_2[:-1]) == reshape_1[0]
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
if can_remove_reshape:
|
| 683 |
+
repl = graph.call_function(mkldnn._linear_pointwise.default, args)
|
| 684 |
+
repl.meta.update(reshape_2_node.meta)
|
| 685 |
+
reshape_2_node.replace_all_uses_with(repl)
|
| 686 |
+
old_linear_node = reshape_2_node.args[0]
|
| 687 |
+
reshape_1_node = old_linear_node.args[0]
|
| 688 |
+
graph.erase_node(reshape_2_node)
|
| 689 |
+
graph.erase_node(old_linear_node)
|
| 690 |
+
if len(reshape_1_node.users) == 0:
|
| 691 |
+
graph.erase_node(reshape_1_node)
|
| 692 |
+
|
| 693 |
+
def is_linear_add_bias(match):
|
| 694 |
+
add_node = match.output_node()
|
| 695 |
+
linear_node = add_node.args[0]
|
| 696 |
+
weight_meta = linear_node.args[1].meta.get("val")
|
| 697 |
+
bias_meta = add_node.args[1].meta.get("val")
|
| 698 |
+
if weight_meta is None or bias_meta is None:
|
| 699 |
+
return False
|
| 700 |
+
return (
|
| 701 |
+
linear_node.args[2] is None
|
| 702 |
+
and bias_meta.dim() == 1
|
| 703 |
+
and bias_meta.size(0) == weight_meta.size(0)
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
# convert linear+bias to a single linear for applying fusion path.
|
| 707 |
+
@register_freezing_graph_pattern(
|
| 708 |
+
CallFunction(
|
| 709 |
+
aten.add.Tensor,
|
| 710 |
+
CallFunction(mkldnn._linear_pointwise.default, *_linear_args),
|
| 711 |
+
Arg(),
|
| 712 |
+
),
|
| 713 |
+
pass_number=1,
|
| 714 |
+
extra_check=is_linear_add_bias,
|
| 715 |
+
)
|
| 716 |
+
def linear_bias_pattern(match, *args):
|
| 717 |
+
graph = match.graph
|
| 718 |
+
add_node = match.output_node()
|
| 719 |
+
linear_node = add_node.args[0]
|
| 720 |
+
new_args = list(linear_node.args)
|
| 721 |
+
new_args[2] = add_node.args[1]
|
| 722 |
+
repl = graph.call_function(
|
| 723 |
+
mkldnn._linear_pointwise.default, tuple(new_args)
|
| 724 |
+
)
|
| 725 |
+
repl.meta.update(add_node.meta)
|
| 726 |
+
add_node.replace_all_uses_with(repl)
|
| 727 |
+
match.erase_nodes(graph)
|
| 728 |
+
|
| 729 |
+
def _is_packable_mkldnn_rnn_layer(match):
|
| 730 |
+
lstm_node = match.output_node()
|
| 731 |
+
POS_WEIGHTS = [1, 2]
|
| 732 |
+
POS_INPUTS = [0, 5, 6]
|
| 733 |
+
POS_ARGS = POS_WEIGHTS + POS_INPUTS
|
| 734 |
+
# Weights should be Constant
|
| 735 |
+
if any(
|
| 736 |
+
lstm_node.args[POS_WEIGHT].op != "get_attr" for POS_WEIGHT in POS_WEIGHTS
|
| 737 |
+
):
|
| 738 |
+
return False
|
| 739 |
+
|
| 740 |
+
# Meta info for weights and inputs should be available
|
| 741 |
+
if any(lstm_node.args[POS_ARG].meta.get("val") is None for POS_ARG in POS_ARGS):
|
| 742 |
+
return False
|
| 743 |
+
|
| 744 |
+
# Check device
|
| 745 |
+
if any(
|
| 746 |
+
lstm_node.args[POS_ARG].meta.get("val").device.type != "cpu"
|
| 747 |
+
for POS_ARG in POS_ARGS
|
| 748 |
+
):
|
| 749 |
+
return False
|
| 750 |
+
|
| 751 |
+
# Check dtype
|
| 752 |
+
if any(
|
| 753 |
+
lstm_node.args[POS_ARG].meta.get("val").dtype == torch.bfloat16
|
| 754 |
+
and not mkldnn._is_mkldnn_bf16_supported()
|
| 755 |
+
for POS_ARG in POS_ARGS
|
| 756 |
+
):
|
| 757 |
+
return False
|
| 758 |
+
|
| 759 |
+
return True
|
| 760 |
+
|
| 761 |
+
def _is_packable_convolution(match):
|
| 762 |
+
"""
|
| 763 |
+
Check if the node is supported for MKLDNN convolution.
|
| 764 |
+
"""
|
| 765 |
+
conv_node = match.output_node()
|
| 766 |
+
input_meta_value = conv_node.args[0].meta.get("val")
|
| 767 |
+
weight_meta_value = conv_node.args[1].meta.get("val")
|
| 768 |
+
if input_meta_value is None or weight_meta_value is None:
|
| 769 |
+
return False
|
| 770 |
+
input_size = input_meta_value.shape
|
| 771 |
+
if conv_node.args[1].op != "get_attr":
|
| 772 |
+
return False
|
| 773 |
+
for meta_value in [input_meta_value, weight_meta_value]:
|
| 774 |
+
if (
|
| 775 |
+
meta_value is None
|
| 776 |
+
or meta_value.device.type != "cpu"
|
| 777 |
+
or meta_value.dim() != 4
|
| 778 |
+
):
|
| 779 |
+
return False
|
| 780 |
+
if (
|
| 781 |
+
input_meta_value.dtype == torch.bfloat16
|
| 782 |
+
or weight_meta_value.dtype == torch.bfloat16
|
| 783 |
+
):
|
| 784 |
+
if not mkldnn._is_mkldnn_bf16_supported():
|
| 785 |
+
return False
|
| 786 |
+
is_transposed = conv_node.args[-3]
|
| 787 |
+
if is_transposed:
|
| 788 |
+
# TODO: Support dynamic shape case for MKLDNN conv transpose.
|
| 789 |
+
if free_symbols(input_size):
|
| 790 |
+
return False
|
| 791 |
+
groups = conv_node.args[-1]
|
| 792 |
+
in_channels = weight_meta_value.size(0)
|
| 793 |
+
# doesn't support group_depthwise_conv_transpose.
|
| 794 |
+
if groups > 1 and groups == in_channels:
|
| 795 |
+
return False
|
| 796 |
+
# Port from: aten/src/ATen/native/Convolution.cpp:is_output_padding_big
|
| 797 |
+
output_paddings = conv_node.args[-2]
|
| 798 |
+
strides = conv_node.args[3]
|
| 799 |
+
if any(
|
| 800 |
+
output_padding >= stride
|
| 801 |
+
for output_padding, stride in zip(output_paddings, strides)
|
| 802 |
+
):
|
| 803 |
+
return False
|
| 804 |
+
return True
|
| 805 |
+
|
| 806 |
+
def _is_packable_linear(match):
|
| 807 |
+
"""
|
| 808 |
+
Check if the node is supported for MKLDNN linear.
|
| 809 |
+
"""
|
| 810 |
+
linear_node = match.output_node()
|
| 811 |
+
# weight_idx is 1 for aten.mm and is 2 for aten.addmm
|
| 812 |
+
weight_idx = 2 if linear_node.target == aten.addmm.default else 1
|
| 813 |
+
if linear_node.args[weight_idx].op != "get_attr":
|
| 814 |
+
return False
|
| 815 |
+
input_meta_value = linear_node.args[weight_idx - 1].meta.get("val")
|
| 816 |
+
weight_meta_value = linear_node.args[weight_idx].meta.get("val")
|
| 817 |
+
if input_meta_value is None or weight_meta_value is None:
|
| 818 |
+
return False
|
| 819 |
+
batch_size = input_meta_value.shape[0]
|
| 820 |
+
is_bf16_weight = weight_meta_value.dtype == torch.bfloat16
|
| 821 |
+
# for fp32, mkl should be enabled and batch_size should not be a free symbol.
|
| 822 |
+
if not is_bf16_weight and (free_symbols(batch_size) or (not torch._C.has_mkl)):
|
| 823 |
+
return False
|
| 824 |
+
for meta_value in [input_meta_value, weight_meta_value]:
|
| 825 |
+
if (
|
| 826 |
+
meta_value is None
|
| 827 |
+
or meta_value.device.type != "cpu"
|
| 828 |
+
or meta_value.dim() != 2
|
| 829 |
+
):
|
| 830 |
+
return False
|
| 831 |
+
if weight_idx == 2:
|
| 832 |
+
bias_meta_value = linear_node.args[0].meta.get("val")
|
| 833 |
+
if (
|
| 834 |
+
bias_meta_value is None
|
| 835 |
+
or meta_value.device.type != "cpu"
|
| 836 |
+
or bias_meta_value.dim() != 1
|
| 837 |
+
or bias_meta_value.size(0) != weight_meta_value.size(1)
|
| 838 |
+
):
|
| 839 |
+
return False
|
| 840 |
+
|
| 841 |
+
if (
|
| 842 |
+
input_meta_value.dtype == torch.bfloat16
|
| 843 |
+
or weight_meta_value.dtype == torch.bfloat16
|
| 844 |
+
):
|
| 845 |
+
if not mkldnn._is_mkldnn_bf16_supported():
|
| 846 |
+
return False
|
| 847 |
+
return True
|
| 848 |
+
|
| 849 |
+
_aten_conv_args = (
|
| 850 |
+
Arg(),
|
| 851 |
+
Arg(),
|
| 852 |
+
Arg(),
|
| 853 |
+
Arg(),
|
| 854 |
+
Arg(),
|
| 855 |
+
Arg(),
|
| 856 |
+
KeywordArg("is_transposed"),
|
| 857 |
+
Arg(),
|
| 858 |
+
Arg(),
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
_aten_mkldnn_rnn_layer_args = (
|
| 862 |
+
Arg(), # input
|
| 863 |
+
Arg(), # weight0
|
| 864 |
+
Arg(), # weight1
|
| 865 |
+
Arg(), # weight2
|
| 866 |
+
Arg(), # weight3
|
| 867 |
+
Arg(), # hx_
|
| 868 |
+
Arg(), # cx_
|
| 869 |
+
KeywordArg("reverse"), # reverse
|
| 870 |
+
Arg(), # batch_sizes
|
| 871 |
+
Arg(), # mode
|
| 872 |
+
Arg(), # hidden_size
|
| 873 |
+
Arg(), # num_layers
|
| 874 |
+
Arg(), # has_biases
|
| 875 |
+
Arg(), # bidirectional
|
| 876 |
+
Arg(), # batch_first
|
| 877 |
+
Arg(), # train
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
def _register_weight_pack_pass():
|
| 881 |
+
@register_freezing_graph_pattern(
|
| 882 |
+
CallFunction(aten.convolution.default, *_aten_conv_args),
|
| 883 |
+
extra_check=_is_packable_convolution,
|
| 884 |
+
)
|
| 885 |
+
def convolution(match, *args, **kwargs):
|
| 886 |
+
is_transposed = kwargs.get("is_transposed")
|
| 887 |
+
assert isinstance(is_transposed, bool)
|
| 888 |
+
graph = match.graph
|
| 889 |
+
conv_node = match.output_node()
|
| 890 |
+
input_size = conv_node.args[0].meta.get("val").shape
|
| 891 |
+
with graph.inserting_before(conv_node):
|
| 892 |
+
constant_args = [args[4], args[3], args[5], args[-1]]
|
| 893 |
+
packed_weight_op = mkldnn._reorder_convolution_weight
|
| 894 |
+
packed_conv_op = mkldnn._convolution_pointwise.default
|
| 895 |
+
if is_transposed:
|
| 896 |
+
constant_args.insert(1, args[-2]) # output_padding
|
| 897 |
+
packed_weight_op = mkldnn._reorder_convolution_transpose_weight
|
| 898 |
+
packed_conv_op = mkldnn._convolution_transpose_pointwise.default
|
| 899 |
+
if not free_symbols(input_size):
|
| 900 |
+
packed_weight_inputs = (
|
| 901 |
+
(args[1],) + tuple(constant_args) + (input_size,)
|
| 902 |
+
)
|
| 903 |
+
packed_weight_node = graph.create_node(
|
| 904 |
+
"call_function", packed_weight_op, args=packed_weight_inputs
|
| 905 |
+
)
|
| 906 |
+
else:
|
| 907 |
+
assert not is_transposed
|
| 908 |
+
# For dynamic shape case, we need to pack weight in runtime.
|
| 909 |
+
packed_weight_node = args[1]
|
| 910 |
+
packed_conv_inputs = (
|
| 911 |
+
(args[0], packed_weight_node, args[2])
|
| 912 |
+
+ tuple(constant_args)
|
| 913 |
+
+ ("none", [], "")
|
| 914 |
+
)
|
| 915 |
+
packed_conv_node = graph.create_node(
|
| 916 |
+
"call_function", packed_conv_op, tuple(packed_conv_inputs)
|
| 917 |
+
)
|
| 918 |
+
conv_node.replace_all_uses_with(packed_conv_node)
|
| 919 |
+
packed_conv_node.meta.update(conv_node.meta)
|
| 920 |
+
graph.erase_node(conv_node)
|
| 921 |
+
|
| 922 |
+
@register_freezing_graph_pattern(
|
| 923 |
+
CallFunction(aten.mkldnn_rnn_layer.default, *_aten_mkldnn_rnn_layer_args),
|
| 924 |
+
extra_check=_is_packable_mkldnn_rnn_layer,
|
| 925 |
+
)
|
| 926 |
+
def mkldnn_rnn_layer(match, *args, **kwargs):
|
| 927 |
+
def get_item(graph, node, index):
|
| 928 |
+
return graph.call_function(operator.getitem, (node, index))
|
| 929 |
+
|
| 930 |
+
graph = match.graph
|
| 931 |
+
lstm_node = match.output_node()
|
| 932 |
+
input = args[0]
|
| 933 |
+
weight0, weight1 = args[1:3]
|
| 934 |
+
reverse = kwargs.get("reverse")
|
| 935 |
+
packed_lstm_op = aten.mkldnn_rnn_layer.default
|
| 936 |
+
hidden_size = args[9]
|
| 937 |
+
has_biases = args[11]
|
| 938 |
+
batch_first = args[13]
|
| 939 |
+
with graph.inserting_before(lstm_node):
|
| 940 |
+
packed_weight_op = mkldnn._reorder_mkldnn_rnn_layer_weight.default
|
| 941 |
+
packed_weight_inputs = (
|
| 942 |
+
weight0,
|
| 943 |
+
weight1,
|
| 944 |
+
hidden_size,
|
| 945 |
+
reverse,
|
| 946 |
+
has_biases,
|
| 947 |
+
batch_first,
|
| 948 |
+
)
|
| 949 |
+
packed_weight_node = graph.create_node(
|
| 950 |
+
"call_function", packed_weight_op, packed_weight_inputs, {}, "name"
|
| 951 |
+
)
|
| 952 |
+
packed_weight_items = [
|
| 953 |
+
get_item(graph, packed_weight_node, i) for i in range(2)
|
| 954 |
+
]
|
| 955 |
+
pack_lstm_inputs = (
|
| 956 |
+
args[0],
|
| 957 |
+
*packed_weight_items,
|
| 958 |
+
args[3],
|
| 959 |
+
args[4],
|
| 960 |
+
args[5],
|
| 961 |
+
args[6],
|
| 962 |
+
reverse,
|
| 963 |
+
*args[7:],
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
packed_lstm_node = graph.create_node(
|
| 967 |
+
"call_function", packed_lstm_op, args=pack_lstm_inputs
|
| 968 |
+
)
|
| 969 |
+
lstm_node.replace_all_uses_with(packed_lstm_node)
|
| 970 |
+
packed_lstm_node.meta.update(lstm_node.meta)
|
| 971 |
+
graph.erase_node(lstm_node)
|
| 972 |
+
|
| 973 |
+
@register_freezing_graph_pattern(
|
| 974 |
+
CallFunction(aten.addmm.default, Arg(), Arg(), Arg()),
|
| 975 |
+
extra_check=_is_packable_linear,
|
| 976 |
+
)
|
| 977 |
+
@register_freezing_graph_pattern(
|
| 978 |
+
CallFunction(aten.mm.default, Arg(), Arg()),
|
| 979 |
+
extra_check=_is_packable_linear,
|
| 980 |
+
)
|
| 981 |
+
def linear(match, *args, **kwargs):
|
| 982 |
+
graph = match.graph
|
| 983 |
+
linear_node = match.output_node()
|
| 984 |
+
input = args[0] if linear_node.target == aten.mm.default else args[1]
|
| 985 |
+
bias = None if linear_node.target == aten.mm.default else args[0]
|
| 986 |
+
weight = args[1] if linear_node.target == aten.mm.default else args[2]
|
| 987 |
+
with graph.inserting_before(linear_node):
|
| 988 |
+
transpose_weight_node = graph.create_node(
|
| 989 |
+
"call_function", aten.permute.default, (weight, (1, 0))
|
| 990 |
+
)
|
| 991 |
+
weight_dtype = weight.meta.get("val").dtype
|
| 992 |
+
is_bf16_weight = weight_dtype == torch.bfloat16
|
| 993 |
+
batch_size = input.meta.get("val").shape[0]
|
| 994 |
+
if free_symbols(batch_size):
|
| 995 |
+
assert (
|
| 996 |
+
is_bf16_weight
|
| 997 |
+
), f"only bf16 weight prepacking supports dynamic shape inputs but got {weight_dtype}"
|
| 998 |
+
# For bfloat16 dynamic shape path, using input size hint to pack weight for a better performance.
|
| 999 |
+
packed_weight_inputs = (
|
| 1000 |
+
transpose_weight_node,
|
| 1001 |
+
batch_size.node.shape_env.size_hint(batch_size.node.expr)
|
| 1002 |
+
if free_symbols(batch_size)
|
| 1003 |
+
else batch_size,
|
| 1004 |
+
)
|
| 1005 |
+
packed_weight_inputs = (transpose_weight_node, batch_size)
|
| 1006 |
+
packed_weight_op = (
|
| 1007 |
+
mkldnn._reorder_linear_weight
|
| 1008 |
+
if is_bf16_weight
|
| 1009 |
+
else torch.ops.mkl._mkl_reorder_linear_weight
|
| 1010 |
+
)
|
| 1011 |
+
packed_weight_node = graph.create_node(
|
| 1012 |
+
"call_function", packed_weight_op, args=packed_weight_inputs
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
packed_linear_inputs = (input, packed_weight_node)
|
| 1016 |
+
if is_bf16_weight:
|
| 1017 |
+
packed_linear_inputs += (bias, "none", [], "")
|
| 1018 |
+
packed_linear_op = mkldnn._linear_pointwise.default
|
| 1019 |
+
else:
|
| 1020 |
+
packed_linear_inputs += (transpose_weight_node, bias, batch_size)
|
| 1021 |
+
packed_linear_op = torch.ops.mkl._mkl_linear
|
| 1022 |
+
packed_linear_node = graph.create_node(
|
| 1023 |
+
"call_function", packed_linear_op, packed_linear_inputs
|
| 1024 |
+
)
|
| 1025 |
+
linear_node.replace_all_uses_with(packed_linear_node)
|
| 1026 |
+
packed_linear_node.meta.update(linear_node.meta)
|
| 1027 |
+
graph.erase_node(linear_node)
|
| 1028 |
+
|
| 1029 |
+
def _eliminate_duplicate_packed_nodes(gm):
|
| 1030 |
+
"""
|
| 1031 |
+
Combine packed weight nodes with the same inputs to reduce memory usage.
|
| 1032 |
+
for example:
|
| 1033 |
+
class Model(nn.Module):
|
| 1034 |
+
def __init__(self):
|
| 1035 |
+
super().__init__()
|
| 1036 |
+
self.linear = nn.Linear(32, 32, bias=True)
|
| 1037 |
+
|
| 1038 |
+
def forward(self, x):
|
| 1039 |
+
return self.linear(self.linear(x))
|
| 1040 |
+
|
| 1041 |
+
the above's packed weight nodes are duplicate if two linear calls have same input size.
|
| 1042 |
+
"""
|
| 1043 |
+
if not (torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available()):
|
| 1044 |
+
return gm
|
| 1045 |
+
|
| 1046 |
+
packed_weight_ops = [
|
| 1047 |
+
torch._C._nn.mkldnn_reorder_conv2d_weight,
|
| 1048 |
+
mkldnn._reorder_convolution_transpose_weight,
|
| 1049 |
+
mkldnn._reorder_linear_weight,
|
| 1050 |
+
mkldnn._reorder_mkldnn_rnn_layer_weight,
|
| 1051 |
+
]
|
| 1052 |
+
if torch._C.has_mkl:
|
| 1053 |
+
packed_weight_ops.append(torch.ops.mkl._mkl_reorder_linear_weight)
|
| 1054 |
+
|
| 1055 |
+
for node in gm.graph.nodes:
|
| 1056 |
+
if node.target in packed_weight_ops and len(node.args[0].users) > 1:
|
| 1057 |
+
for user_node in list(node.args[0].users.keys()):
|
| 1058 |
+
if (
|
| 1059 |
+
user_node.target == node.target
|
| 1060 |
+
and user_node != node
|
| 1061 |
+
and user_node.args == node.args
|
| 1062 |
+
):
|
| 1063 |
+
user_node.replace_all_uses_with(node)
|
| 1064 |
+
gm.graph.erase_node(user_node)
|
| 1065 |
+
|
| 1066 |
+
@functools.lru_cache(None)
|
| 1067 |
+
def _mkldnn_fusion_init():
|
| 1068 |
+
if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available():
|
| 1069 |
+
_register_unary_fusion()
|
| 1070 |
+
_register_inplace_fusion()
|
| 1071 |
+
_register_binary_unary_fusion()
|
| 1072 |
+
_register_binary_fusion()
|
| 1073 |
+
_register_quantization_lowerings()
|
| 1074 |
+
|
| 1075 |
+
@functools.lru_cache(None)
|
| 1076 |
+
def _mkldnn_weight_pack_init():
|
| 1077 |
+
if torch.backends.mkldnn.enabled and torch.backends.mkldnn.is_available():
|
| 1078 |
+
_register_weight_pack_pass()
|
| 1079 |
+
_recover_linear()
|
| 1080 |
+
_register_quantization_weight_pack_pass()
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pad_mm.py
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from itertools import chain
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch import Tensor
|
| 7 |
+
from torch._inductor import utils
|
| 8 |
+
from torch.utils._mode_utils import no_dispatch
|
| 9 |
+
|
| 10 |
+
from ..pattern_matcher import inference_graph, register_replacement, training_graph
|
| 11 |
+
|
| 12 |
+
aten = torch.ops.aten
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def fetch_fake_tensors(match, kwarg_names):
|
| 16 |
+
kwargs = match.kwargs
|
| 17 |
+
return [kwargs[name].meta["val"] for name in kwarg_names]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def unwrap_fake_args(*arg_names):
|
| 21 |
+
def decorator(func):
|
| 22 |
+
def wrapper(match):
|
| 23 |
+
fake_tensors = fetch_fake_tensors(match, arg_names)
|
| 24 |
+
return func(*fake_tensors)
|
| 25 |
+
|
| 26 |
+
return wrapper
|
| 27 |
+
|
| 28 |
+
return decorator
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def get_alignment_size(x):
|
| 32 |
+
if x.dtype == torch.float16 or x.dtype == torch.half or x.dtype == torch.bfloat16:
|
| 33 |
+
return 8
|
| 34 |
+
elif x.dtype == torch.float32 or x.dtype == torch.float:
|
| 35 |
+
return 4
|
| 36 |
+
else:
|
| 37 |
+
return 0
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def check_device(a: Tensor, b: Tensor):
|
| 41 |
+
return a.is_cuda and b.is_cuda
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def check_dtype(a: Tensor, b: Tensor):
|
| 45 |
+
return a.is_floating_point() and b.is_floating_point()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def is_symbolic(a: Optional[Tensor]):
|
| 49 |
+
return a is not None and any(
|
| 50 |
+
isinstance(x, torch.SymInt) for x in chain(a.size(), a.stride())
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def any_is_symbolic(*args):
|
| 55 |
+
return any(is_symbolic(a) for a in args)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def should_pad_common(mat1, mat2, input=None):
|
| 59 |
+
return (
|
| 60 |
+
torch._inductor.config.shape_padding
|
| 61 |
+
and check_device(mat1, mat2)
|
| 62 |
+
and check_dtype(mat1, mat2)
|
| 63 |
+
and not any_is_symbolic(mat1, mat2, input)
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def get_padded_length(x, alignment_size):
|
| 68 |
+
if alignment_size == 0 or x % alignment_size == 0:
|
| 69 |
+
return 0
|
| 70 |
+
return int((x // alignment_size + 1) * alignment_size) - x
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def pad_dim(x, padded_length, dim):
|
| 74 |
+
if padded_length == 0:
|
| 75 |
+
return x
|
| 76 |
+
pad = x.new_zeros(*x.shape[:dim], padded_length, *x.shape[dim + 1 :])
|
| 77 |
+
return torch.cat([x, pad], dim=dim)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def addmm_pattern(input, mat1, mat2, beta, alpha):
|
| 81 |
+
return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def should_pad_addmm(match):
|
| 85 |
+
mat1, mat2, input = fetch_fake_tensors(match, ("mat1", "mat2", "input"))
|
| 86 |
+
return should_pad_common(mat1, mat2, input) and should_pad_bench(
|
| 87 |
+
mat1, mat2, torch.ops.aten.addmm, input=input
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def addmm_replace(input, mat1, mat2, beta=1.0, alpha=1.0):
|
| 92 |
+
m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
|
| 93 |
+
k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
|
| 94 |
+
n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
|
| 95 |
+
|
| 96 |
+
if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
|
| 97 |
+
return pad_addmm(
|
| 98 |
+
input,
|
| 99 |
+
mat1,
|
| 100 |
+
mat2,
|
| 101 |
+
m_padded_length,
|
| 102 |
+
k_padded_length,
|
| 103 |
+
n_padded_length,
|
| 104 |
+
beta,
|
| 105 |
+
alpha,
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
return aten.addmm(input, mat1, mat2, beta=beta, alpha=alpha)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def pad_addmm(
|
| 112 |
+
input,
|
| 113 |
+
mat1,
|
| 114 |
+
mat2,
|
| 115 |
+
m_padded_length,
|
| 116 |
+
k_padded_length,
|
| 117 |
+
n_padded_length,
|
| 118 |
+
beta=1.0,
|
| 119 |
+
alpha=1.0,
|
| 120 |
+
):
|
| 121 |
+
# addmm decomp with padding will go through pad_addmm multiple times if multiple dimensions are needed to be padded
|
| 122 |
+
if k_padded_length != 0:
|
| 123 |
+
mat1 = pad_dim(mat1, k_padded_length, 1)
|
| 124 |
+
mat2 = pad_dim(mat2, k_padded_length, 0)
|
| 125 |
+
elif n_padded_length != 0:
|
| 126 |
+
mat2 = pad_dim(mat2, n_padded_length, 1)
|
| 127 |
+
elif m_padded_length != 0:
|
| 128 |
+
mat1 = pad_dim(mat1, m_padded_length, 0)
|
| 129 |
+
|
| 130 |
+
if input is not None and k_padded_length == 0:
|
| 131 |
+
if n_padded_length != 0:
|
| 132 |
+
if input.dim() == 2:
|
| 133 |
+
input = pad_dim(input, n_padded_length, 1)
|
| 134 |
+
elif input.dim() == 1:
|
| 135 |
+
input = pad_dim(input, n_padded_length, 0)
|
| 136 |
+
elif m_padded_length != 0 and input.dim() == 2:
|
| 137 |
+
input = pad_dim(input, m_padded_length, 0)
|
| 138 |
+
|
| 139 |
+
if k_padded_length != 0:
|
| 140 |
+
return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)
|
| 141 |
+
elif n_padded_length != 0:
|
| 142 |
+
return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
|
| 143 |
+
:, :-n_padded_length
|
| 144 |
+
]
|
| 145 |
+
else:
|
| 146 |
+
return addmm_replace(input, mat1, mat2, beta=beta, alpha=alpha)[
|
| 147 |
+
:-m_padded_length, :
|
| 148 |
+
]
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def is_mm_compute_bound(M, K, N, dtype):
|
| 152 |
+
denominator = M * K + N * K + M * N
|
| 153 |
+
if denominator == 0:
|
| 154 |
+
return False
|
| 155 |
+
arithmetic_intensity = (M * N * K) / denominator
|
| 156 |
+
|
| 157 |
+
# Fails with AMD
|
| 158 |
+
try:
|
| 159 |
+
machine_balance = (
|
| 160 |
+
1000 * utils.get_device_tflops(dtype)
|
| 161 |
+
) / utils.get_gpu_dram_gbps()
|
| 162 |
+
except Exception:
|
| 163 |
+
return True
|
| 164 |
+
|
| 165 |
+
# dram_gbps might be underestimating bandwidth because of cache.
|
| 166 |
+
# if we estimate machine balance too low we might miss some speedups,
|
| 167 |
+
# if we extimate too high there will be unnecessary compilation time increase.
|
| 168 |
+
# TODO - finetune coefficient here. As a reference point, Triton mm model assumes
|
| 169 |
+
# 80% of reads are in cache and cache is 4x faster than dram_gbps
|
| 170 |
+
machine_balance = machine_balance * 0.5
|
| 171 |
+
|
| 172 |
+
return arithmetic_intensity > machine_balance
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@functools.lru_cache(None)
|
| 176 |
+
def get_pad_cache():
|
| 177 |
+
return torch._inductor.codecache.LocalCache()
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def get_cached_should_pad(key):
|
| 181 |
+
return get_pad_cache().lookup(key)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def set_cached_should_pad(key, value):
|
| 185 |
+
return get_pad_cache().set_value(key, value=value)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def should_pad_bench_key(mat1, mat2, op, input=None):
|
| 189 |
+
def tensor_key(t):
|
| 190 |
+
return (t.shape, t.stride(), t.dtype)
|
| 191 |
+
|
| 192 |
+
tf32_key = (
|
| 193 |
+
None if mat1.dtype != torch.float32 else torch.backends.cuda.matmul.allow_tf32
|
| 194 |
+
)
|
| 195 |
+
key = (
|
| 196 |
+
tensor_key(mat1),
|
| 197 |
+
tensor_key(mat2),
|
| 198 |
+
op,
|
| 199 |
+
input if input is None else tensor_key(input),
|
| 200 |
+
tf32_key,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
return str(key)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def should_pad_bench(mat1, mat2, op, input=None):
|
| 207 |
+
if not utils.has_triton():
|
| 208 |
+
return False
|
| 209 |
+
|
| 210 |
+
do_bench = functools.partial(
|
| 211 |
+
utils.do_bench,
|
| 212 |
+
warmup=5,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
with no_dispatch():
|
| 216 |
+
if op is torch.ops.aten.mm or op is torch.ops.aten.addmm:
|
| 217 |
+
m = mat1.shape[0]
|
| 218 |
+
k = mat1.shape[1]
|
| 219 |
+
n = mat2.shape[1]
|
| 220 |
+
|
| 221 |
+
m_padded_length = get_padded_length(m, get_alignment_size(mat1))
|
| 222 |
+
k_padded_length = get_padded_length(k, get_alignment_size(mat1))
|
| 223 |
+
n_padded_length = get_padded_length(n, get_alignment_size(mat2))
|
| 224 |
+
elif op is torch.ops.aten.bmm:
|
| 225 |
+
m = mat1.shape[1]
|
| 226 |
+
k = mat2.shape[2]
|
| 227 |
+
n = mat2.shape[2]
|
| 228 |
+
|
| 229 |
+
m_padded_length = get_padded_length(m, get_alignment_size(mat1))
|
| 230 |
+
k_padded_length = get_padded_length(k, get_alignment_size(mat1))
|
| 231 |
+
n_padded_length = get_padded_length(n, get_alignment_size(mat2))
|
| 232 |
+
else:
|
| 233 |
+
return False
|
| 234 |
+
|
| 235 |
+
if m_padded_length == k_padded_length == n_padded_length == 0:
|
| 236 |
+
return False
|
| 237 |
+
|
| 238 |
+
if not is_mm_compute_bound(m, k, n, mat1.dtype):
|
| 239 |
+
return False
|
| 240 |
+
|
| 241 |
+
# We don't want to look up the cache for cases that are trivially false
|
| 242 |
+
# since it does file io
|
| 243 |
+
key = should_pad_bench_key(mat1, mat2, op, input)
|
| 244 |
+
|
| 245 |
+
cached_pad = get_cached_should_pad(key)
|
| 246 |
+
if cached_pad is not None:
|
| 247 |
+
return cached_pad
|
| 248 |
+
|
| 249 |
+
mat1 = torch.randn_like(mat1)
|
| 250 |
+
mat2 = torch.randn_like(mat2)
|
| 251 |
+
if op is torch.ops.aten.bmm or op is torch.ops.aten.mm:
|
| 252 |
+
ori_time = do_bench(
|
| 253 |
+
lambda: op(mat1, mat2),
|
| 254 |
+
)
|
| 255 |
+
else:
|
| 256 |
+
if input is not None:
|
| 257 |
+
input = torch.randn_like(input)
|
| 258 |
+
ori_time = do_bench(
|
| 259 |
+
lambda: op(input, mat1, mat2),
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
mat1_pad = torch.randn_like(mat1)
|
| 263 |
+
mat2_pad = torch.randn_like(mat2)
|
| 264 |
+
|
| 265 |
+
if op is torch.ops.aten.addmm:
|
| 266 |
+
input_pad = None
|
| 267 |
+
if input is not None and input.is_cuda:
|
| 268 |
+
input_pad = torch.randn_like(input)
|
| 269 |
+
pad_time = do_bench(
|
| 270 |
+
lambda: pad_addmm(
|
| 271 |
+
input_pad,
|
| 272 |
+
mat1_pad,
|
| 273 |
+
mat2_pad,
|
| 274 |
+
m_padded_length,
|
| 275 |
+
k_padded_length,
|
| 276 |
+
n_padded_length,
|
| 277 |
+
),
|
| 278 |
+
)
|
| 279 |
+
elif op is torch.ops.aten.mm:
|
| 280 |
+
pad_time = do_bench(
|
| 281 |
+
lambda: pad_mm(
|
| 282 |
+
mat1_pad,
|
| 283 |
+
mat2_pad,
|
| 284 |
+
m_padded_length,
|
| 285 |
+
k_padded_length,
|
| 286 |
+
n_padded_length,
|
| 287 |
+
),
|
| 288 |
+
)
|
| 289 |
+
else:
|
| 290 |
+
pad_time = do_bench(
|
| 291 |
+
lambda: pad_bmm(
|
| 292 |
+
mat1_pad,
|
| 293 |
+
mat2_pad,
|
| 294 |
+
m_padded_length,
|
| 295 |
+
k_padded_length,
|
| 296 |
+
n_padded_length,
|
| 297 |
+
),
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
# Shape padding introduces additional memory ops. Based on microbenchmarks, 1.1x represents a reasonable
|
| 301 |
+
# tradeoff between performance improvement from shape padding and overhead from additional memory ops
|
| 302 |
+
# TODO: Build a learned model which would be better than this heuristic
|
| 303 |
+
should_pad = ori_time > pad_time * 1.1
|
| 304 |
+
set_cached_should_pad(key, should_pad)
|
| 305 |
+
|
| 306 |
+
return should_pad
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def mm_pattern(mat1, mat2):
|
| 310 |
+
return aten.mm(mat1, mat2)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def should_pad_mm(match):
|
| 314 |
+
mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
|
| 315 |
+
return should_pad_common(mat1, mat2) and should_pad_bench(
|
| 316 |
+
mat1, mat2, torch.ops.aten.mm
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def mm_replace(mat1, mat2):
|
| 321 |
+
m_padded_length = get_padded_length(mat1.shape[0], get_alignment_size(mat1))
|
| 322 |
+
k_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
|
| 323 |
+
n_padded_length = get_padded_length(mat2.shape[1], get_alignment_size(mat2))
|
| 324 |
+
|
| 325 |
+
return pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def pad_mm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length):
|
| 329 |
+
# mm_replace will go through pad_mm multiple times if multiple dimensions are needed to be padded
|
| 330 |
+
if k_padded_length != 0:
|
| 331 |
+
mat1 = pad_dim(mat1, k_padded_length, 1)
|
| 332 |
+
mat2 = pad_dim(mat2, k_padded_length, 0)
|
| 333 |
+
return torch.ops.aten.mm(mat1, mat2)
|
| 334 |
+
elif n_padded_length != 0:
|
| 335 |
+
mat2 = pad_dim(mat2, n_padded_length, 1)
|
| 336 |
+
return torch.ops.aten.mm(mat1, mat2)[:, :-n_padded_length]
|
| 337 |
+
else:
|
| 338 |
+
mat1 = pad_dim(mat1, m_padded_length, 0)
|
| 339 |
+
return torch.ops.aten.mm(mat1, mat2)[:-m_padded_length, :]
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def bmm_pattern(mat1, mat2):
|
| 343 |
+
return aten.bmm(mat1, mat2)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def should_pad_bmm(match):
|
| 347 |
+
mat1, mat2 = fetch_fake_tensors(match, ("mat1", "mat2"))
|
| 348 |
+
return should_pad_common(mat1, mat2) and should_pad_bench(
|
| 349 |
+
mat1, mat2, torch.ops.aten.bmm
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def bmm_replace(mat1, mat2):
|
| 354 |
+
m_padded_length = get_padded_length(mat1.shape[1], get_alignment_size(mat1))
|
| 355 |
+
k_padded_length = get_padded_length(mat1.shape[2], get_alignment_size(mat1))
|
| 356 |
+
n_padded_length = get_padded_length(mat2.shape[2], get_alignment_size(mat2))
|
| 357 |
+
|
| 358 |
+
if m_padded_length != 0 or k_padded_length != 0 or n_padded_length != 0:
|
| 359 |
+
return pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length)
|
| 360 |
+
|
| 361 |
+
return aten.bmm(mat1, mat2)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def pad_bmm(mat1, mat2, m_padded_length, k_padded_length, n_padded_length):
|
| 365 |
+
# bmm_replace will go through pad_bmm multiple times if multiple dimensions are needed to be padded
|
| 366 |
+
if k_padded_length != 0:
|
| 367 |
+
mat1 = pad_dim(mat1, k_padded_length, 2)
|
| 368 |
+
mat2 = pad_dim(mat2, k_padded_length, 1)
|
| 369 |
+
|
| 370 |
+
return aten.bmm(mat1, mat2)
|
| 371 |
+
elif n_padded_length != 0:
|
| 372 |
+
mat2 = pad_dim(mat2, n_padded_length, 2)
|
| 373 |
+
return aten.bmm(mat1, mat2)[:, :, :-n_padded_length].contiguous()
|
| 374 |
+
else:
|
| 375 |
+
mat1 = pad_dim(mat1, m_padded_length, 1)
|
| 376 |
+
return aten.bmm(mat1, mat2)[:, :-m_padded_length, :].contiguous()
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
@functools.lru_cache(None)
|
| 380 |
+
def _pad_mm_init():
|
| 381 |
+
from .joint_graph import patterns
|
| 382 |
+
|
| 383 |
+
if torch.cuda.is_available():
|
| 384 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 385 |
+
device = "cuda"
|
| 386 |
+
else:
|
| 387 |
+
device = "cpu"
|
| 388 |
+
|
| 389 |
+
# sizes/values dont actually matter for initial trace
|
| 390 |
+
# once we get a possible match we re-trace with the actual values and verify the match still holds
|
| 391 |
+
|
| 392 |
+
dim2a = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
|
| 393 |
+
dim2b = functools.partial(torch.empty, (4, 4), device=device, requires_grad=True)
|
| 394 |
+
|
| 395 |
+
dim3a = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
|
| 396 |
+
dim3b = functools.partial(torch.empty, (4, 4, 4), device=device, requires_grad=True)
|
| 397 |
+
|
| 398 |
+
dim1a = functools.partial(torch.empty, (4), device=device, requires_grad=True)
|
| 399 |
+
|
| 400 |
+
# workaround https://github.com/pytorch/pytorch/issues/97894
|
| 401 |
+
# 0.113377 is a "magic" value that lets us recover the lost input arg relationship
|
| 402 |
+
rep = {"beta": 0.213377, "alpha": 0.113377}
|
| 403 |
+
|
| 404 |
+
for pattern, replacement, args, workaround, extra_check in [
|
| 405 |
+
(
|
| 406 |
+
mm_pattern,
|
| 407 |
+
mm_replace,
|
| 408 |
+
[dim2a(), dim2b()],
|
| 409 |
+
{},
|
| 410 |
+
should_pad_mm,
|
| 411 |
+
),
|
| 412 |
+
(
|
| 413 |
+
bmm_pattern,
|
| 414 |
+
bmm_replace,
|
| 415 |
+
[dim3a(), dim3b()],
|
| 416 |
+
{},
|
| 417 |
+
should_pad_bmm,
|
| 418 |
+
),
|
| 419 |
+
(
|
| 420 |
+
addmm_pattern,
|
| 421 |
+
addmm_replace,
|
| 422 |
+
[dim1a(), dim2a(), dim2b()],
|
| 423 |
+
rep,
|
| 424 |
+
should_pad_addmm,
|
| 425 |
+
),
|
| 426 |
+
]:
|
| 427 |
+
args = [*args, *workaround.values()]
|
| 428 |
+
register_replacement(
|
| 429 |
+
pattern,
|
| 430 |
+
replacement,
|
| 431 |
+
args,
|
| 432 |
+
training_graph,
|
| 433 |
+
patterns,
|
| 434 |
+
extra_check=extra_check,
|
| 435 |
+
scalar_workaround=workaround,
|
| 436 |
+
)
|
| 437 |
+
register_replacement(
|
| 438 |
+
pattern,
|
| 439 |
+
replacement,
|
| 440 |
+
args,
|
| 441 |
+
inference_graph,
|
| 442 |
+
patterns,
|
| 443 |
+
extra_check=extra_check,
|
| 444 |
+
scalar_workaround=workaround,
|
| 445 |
+
)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/post_grad.py
ADDED
|
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import itertools
|
| 3 |
+
import logging
|
| 4 |
+
import operator
|
| 5 |
+
from typing import List, Optional, Union
|
| 6 |
+
|
| 7 |
+
from sympy import Expr
|
| 8 |
+
|
| 9 |
+
import torch
|
| 10 |
+
import torch._inductor as inductor
|
| 11 |
+
|
| 12 |
+
from .. import config, ir, pattern_matcher
|
| 13 |
+
|
| 14 |
+
from ..lowering import lowerings as L
|
| 15 |
+
from ..pattern_matcher import (
|
| 16 |
+
_return_true,
|
| 17 |
+
Arg,
|
| 18 |
+
CallFunction,
|
| 19 |
+
filter_nodes,
|
| 20 |
+
get_arg_value,
|
| 21 |
+
Ignored,
|
| 22 |
+
init_once_fakemode,
|
| 23 |
+
KeywordArg,
|
| 24 |
+
ListOf,
|
| 25 |
+
Match,
|
| 26 |
+
MULTIPLE,
|
| 27 |
+
PatternMatcherPass,
|
| 28 |
+
register_graph_pattern,
|
| 29 |
+
remove_extra_clones,
|
| 30 |
+
stable_topological_sort,
|
| 31 |
+
)
|
| 32 |
+
from ..virtualized import V
|
| 33 |
+
from .group_batch_fusion import group_batch_fusion_post_grad_passes
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
log = logging.getLogger(__name__)
|
| 37 |
+
aten = torch.ops.aten
|
| 38 |
+
prims = torch.ops.prims
|
| 39 |
+
|
| 40 |
+
# First pass_patterns[0] are applied, then [1], then [2]
|
| 41 |
+
pass_patterns = [
|
| 42 |
+
PatternMatcherPass(),
|
| 43 |
+
PatternMatcherPass(),
|
| 44 |
+
PatternMatcherPass(),
|
| 45 |
+
]
|
| 46 |
+
# patterns applied only in inference
|
| 47 |
+
inference_patterns = PatternMatcherPass()
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
|
| 51 |
+
"""
|
| 52 |
+
Passes that run on after grad. This is called once on the forwards
|
| 53 |
+
graph and once on the backwards graph.
|
| 54 |
+
|
| 55 |
+
The IR here has been normalized and functionalized.
|
| 56 |
+
"""
|
| 57 |
+
if config.dce:
|
| 58 |
+
# has some issues with mutation in inference mode
|
| 59 |
+
gm.graph.eliminate_dead_code()
|
| 60 |
+
|
| 61 |
+
if is_inference and config.reordering:
|
| 62 |
+
reorder_for_locality(gm.graph)
|
| 63 |
+
|
| 64 |
+
if config.pattern_matcher:
|
| 65 |
+
lazy_init()
|
| 66 |
+
|
| 67 |
+
group_batch_fusion_post_grad_passes(gm.graph)
|
| 68 |
+
remove_extra_clones(gm.graph)
|
| 69 |
+
|
| 70 |
+
for patterns in pass_patterns:
|
| 71 |
+
patterns.apply(gm.graph)
|
| 72 |
+
if is_inference:
|
| 73 |
+
inference_patterns.apply(gm.graph)
|
| 74 |
+
|
| 75 |
+
stable_topological_sort(gm.graph)
|
| 76 |
+
gm.recompile()
|
| 77 |
+
gm.graph.lint()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@init_once_fakemode
|
| 81 |
+
def lazy_init():
|
| 82 |
+
if torch._C._has_mkldnn:
|
| 83 |
+
from .mkldnn_fusion import _mkldnn_fusion_init
|
| 84 |
+
|
| 85 |
+
_mkldnn_fusion_init()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def reorder_for_locality(graph: torch.fx.Graph):
|
| 89 |
+
def visit(other_node):
|
| 90 |
+
if (
|
| 91 |
+
other_node.op == "call_function"
|
| 92 |
+
and other_node.target != operator.getitem
|
| 93 |
+
and all((n in seen_nodes) for n in other_node.users)
|
| 94 |
+
):
|
| 95 |
+
# move node's producers right before it
|
| 96 |
+
node.prepend(other_node)
|
| 97 |
+
|
| 98 |
+
seen_nodes = set()
|
| 99 |
+
|
| 100 |
+
# only reorder nodes before the first copy_ in the graph.
|
| 101 |
+
# copy_ will appear at the end of functionalized graphs when there is mutation on inputs,
|
| 102 |
+
# and this reordering doesnt work well with mutation
|
| 103 |
+
first_copy = next(
|
| 104 |
+
(
|
| 105 |
+
node
|
| 106 |
+
for node in graph.nodes
|
| 107 |
+
if node.op == "call_function"
|
| 108 |
+
and node.target == torch.ops.aten.copy_.default
|
| 109 |
+
),
|
| 110 |
+
None,
|
| 111 |
+
)
|
| 112 |
+
past_mutating_epilogue = True if first_copy is None else False
|
| 113 |
+
|
| 114 |
+
for node in reversed(graph.nodes):
|
| 115 |
+
seen_nodes.add(node)
|
| 116 |
+
if not past_mutating_epilogue:
|
| 117 |
+
past_mutating_epilogue = node is first_copy
|
| 118 |
+
continue
|
| 119 |
+
|
| 120 |
+
torch.fx.map_arg((node.args, node.kwargs), visit)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def register_lowering_pattern(pattern, extra_check=_return_true, pass_number=1):
|
| 124 |
+
"""
|
| 125 |
+
Register an aten to inductor IR replacement pattern
|
| 126 |
+
"""
|
| 127 |
+
return pattern_matcher.register_lowering_pattern(
|
| 128 |
+
pattern, extra_check, pass_dict=pass_patterns[pass_number]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
################################################################################
|
| 133 |
+
# Actual patterns below this point.
|
| 134 |
+
# Priority of patterns is:
|
| 135 |
+
# - later output nodes first
|
| 136 |
+
# - order patterns are defined in
|
| 137 |
+
################################################################################
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@register_lowering_pattern(
|
| 141 |
+
CallFunction(
|
| 142 |
+
aten.add,
|
| 143 |
+
CallFunction(aten.mm, Arg(), Arg()),
|
| 144 |
+
CallFunction(aten.mm, Arg(), Arg()),
|
| 145 |
+
)
|
| 146 |
+
)
|
| 147 |
+
def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4):
|
| 148 |
+
return inductor.kernel.mm_plus_mm.tuned_mm_plus_mm(mat1, mat2, mat3, mat4) # type: ignore[attr-defined]
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def cuda_and_enabled_mixed_mm(match):
|
| 152 |
+
return (config.use_mixed_mm or config.force_mixed_mm) and getattr(
|
| 153 |
+
match.kwargs["mat1"].meta.get("val"), "is_cuda", False
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def cuda_and_enabled_mixed_mm_and_not_int8(match):
|
| 158 |
+
return (
|
| 159 |
+
cuda_and_enabled_mixed_mm(match)
|
| 160 |
+
and getattr(match.kwargs["mat1"].meta.get("val"), "is_cuda", False)
|
| 161 |
+
and getattr(match.kwargs["mat2"].meta.get("val"), "dtype", torch.int8)
|
| 162 |
+
!= torch.int8
|
| 163 |
+
) # bitshift numerics in triton and pytorch don't match for torch.int8
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
"""
|
| 167 |
+
this is intended to be used to unpack a [K,N] int4 tensor from a [K/2, N] uint4x2 tensor
|
| 168 |
+
(where the int4 and uint4x2 are represented with int8 and uint8 respectively)
|
| 169 |
+
where every other row of the int4 is packed with the row above it as:
|
| 170 |
+
uint4x2[k,n] = (8+int4[2*k,n])+(8+int4[2*k+1,n])<<4
|
| 171 |
+
|
| 172 |
+
unpack formulas:
|
| 173 |
+
int4[2*k,n]=(uint4x2[k,n] & 0xF) - 8
|
| 174 |
+
int4[2*k+1,n]=(uint4x2[k,n] >> 4) - 8
|
| 175 |
+
|
| 176 |
+
thus matching on unpack formula:
|
| 177 |
+
torch.mm(mat1, torch.cat((mat2 & 0xF, mat2>>4),1).reshape(mat2_mm_shape).to(mat2_dtype).sub(8))
|
| 178 |
+
|
| 179 |
+
note: although the unpack formula in pytorch and the triton kernel is designed for a uint8 mat2, the behavior
|
| 180 |
+
of the kernel matches the pytorch formula for all dtypes except torch.int8
|
| 181 |
+
where the bitwise numerics in triton do not match those in pytorch.
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@register_lowering_pattern(
|
| 186 |
+
CallFunction(
|
| 187 |
+
aten.mm.default,
|
| 188 |
+
KeywordArg("mat1"),
|
| 189 |
+
CallFunction(
|
| 190 |
+
aten.sub.Tensor,
|
| 191 |
+
CallFunction(
|
| 192 |
+
prims.convert_element_type.default,
|
| 193 |
+
CallFunction(
|
| 194 |
+
aten.reshape.default,
|
| 195 |
+
CallFunction(
|
| 196 |
+
aten.cat.default,
|
| 197 |
+
ListOf(
|
| 198 |
+
CallFunction(
|
| 199 |
+
aten.bitwise_and.Scalar,
|
| 200 |
+
KeywordArg("mat2"),
|
| 201 |
+
0xF,
|
| 202 |
+
),
|
| 203 |
+
CallFunction(
|
| 204 |
+
aten.__rshift__.Scalar,
|
| 205 |
+
KeywordArg("mat2"),
|
| 206 |
+
4,
|
| 207 |
+
),
|
| 208 |
+
),
|
| 209 |
+
1,
|
| 210 |
+
),
|
| 211 |
+
KeywordArg("mat2_mm_shape"),
|
| 212 |
+
),
|
| 213 |
+
KeywordArg("mat2_dtype"),
|
| 214 |
+
),
|
| 215 |
+
8,
|
| 216 |
+
),
|
| 217 |
+
),
|
| 218 |
+
extra_check=cuda_and_enabled_mixed_mm_and_not_int8,
|
| 219 |
+
)
|
| 220 |
+
def uint4x2_mixed_mm(match: Match, mat1, mat2, mat2_mm_shape, mat2_dtype):
|
| 221 |
+
return inductor.kernel.unpack_mixed_mm.tuned_uint4x2_mixed_mm( # type: ignore[attr-defined]
|
| 222 |
+
mat1, mat2, mat2_mm_shape, mat2_dtype
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
torch.mm(mat1, mat2.to(mat2_dtype))
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@register_lowering_pattern(
|
| 232 |
+
CallFunction(
|
| 233 |
+
aten.mm,
|
| 234 |
+
KeywordArg("mat1"),
|
| 235 |
+
CallFunction(
|
| 236 |
+
prims.convert_element_type.default,
|
| 237 |
+
KeywordArg("mat2"),
|
| 238 |
+
KeywordArg("mat2_dtype"),
|
| 239 |
+
),
|
| 240 |
+
),
|
| 241 |
+
extra_check=cuda_and_enabled_mixed_mm,
|
| 242 |
+
)
|
| 243 |
+
def mixed_mm(match: Match, mat1, mat2, mat2_dtype):
|
| 244 |
+
return inductor.kernel.mm.tuned_mixed_mm(mat1, mat2, mat2_dtype) # type: ignore[attr-defined]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@register_graph_pattern(
|
| 248 |
+
CallFunction(
|
| 249 |
+
aten.cumsum.default,
|
| 250 |
+
CallFunction(
|
| 251 |
+
torch.ops.aten.full.default,
|
| 252 |
+
[Arg(), Arg()],
|
| 253 |
+
1,
|
| 254 |
+
dtype=KeywordArg("dtype"),
|
| 255 |
+
layout=Ignored(),
|
| 256 |
+
device=KeywordArg("device"),
|
| 257 |
+
pin_memory=False,
|
| 258 |
+
_users=MULTIPLE,
|
| 259 |
+
),
|
| 260 |
+
1,
|
| 261 |
+
_users=MULTIPLE,
|
| 262 |
+
),
|
| 263 |
+
pass_dict=pass_patterns[1],
|
| 264 |
+
)
|
| 265 |
+
def pointless_cumsum_replacement(match: Match, size0, size1, device, dtype):
|
| 266 |
+
"""Based on a pattern in OPTForCausalLM"""
|
| 267 |
+
|
| 268 |
+
def repl(size0, size1):
|
| 269 |
+
return torch.arange(1, size1 + 1, device=device, dtype=dtype).expand(
|
| 270 |
+
size0, size1
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
# only replace the output node, not all nodes
|
| 274 |
+
match.nodes = [match.output_node()]
|
| 275 |
+
with V.fake_mode:
|
| 276 |
+
match.replace_by_example(repl, [size0, size1])
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def shape_of_mm(a, b):
|
| 280 |
+
m, _ = a.get_size()
|
| 281 |
+
_, n = b.get_size()
|
| 282 |
+
return [m, n]
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
@register_lowering_pattern(
|
| 286 |
+
CallFunction(aten.cat, ListOf(CallFunction(aten.mm, Arg(), Arg())), Arg()),
|
| 287 |
+
)
|
| 288 |
+
def cat_mm(match, inputs, dim):
|
| 289 |
+
return cat_tuned_op(match, inputs, dim, op=L[aten.mm], shape_of=shape_of_mm)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@register_lowering_pattern(
|
| 293 |
+
CallFunction(
|
| 294 |
+
aten.cat, ListOf(CallFunction(aten.addmm, Arg(), Arg(), Arg())), Arg()
|
| 295 |
+
),
|
| 296 |
+
)
|
| 297 |
+
def cat_addmm(match, inputs, dim):
|
| 298 |
+
def shape_of(bias, a, b):
|
| 299 |
+
m, _ = a.get_size()
|
| 300 |
+
_, n = b.get_size()
|
| 301 |
+
return [m, n]
|
| 302 |
+
|
| 303 |
+
return cat_tuned_op(match, inputs, dim, op=L[aten.addmm], shape_of=shape_of)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
def cat_tuned_op(match, inputs, dim, *, op, shape_of):
|
| 307 |
+
"""
|
| 308 |
+
Memory planning to remove cat. We can't use the stock memory
|
| 309 |
+
planner since autotuning matmuls needs to know the output layout.
|
| 310 |
+
"""
|
| 311 |
+
if len(inputs) == 1:
|
| 312 |
+
return op(*inputs[0])
|
| 313 |
+
|
| 314 |
+
# TODO(jansel): rewrite this as a bmm?
|
| 315 |
+
if dim < 0:
|
| 316 |
+
dim += len(shape_of(*inputs[0]))
|
| 317 |
+
assert dim in (0, 1)
|
| 318 |
+
notdim = 1 - dim
|
| 319 |
+
|
| 320 |
+
new_size: Optional[Union[List[Expr], List[int]]] = None
|
| 321 |
+
offsets_start = []
|
| 322 |
+
offsets_end = []
|
| 323 |
+
|
| 324 |
+
# compute output sizes
|
| 325 |
+
for i in range(len(inputs)):
|
| 326 |
+
shape = shape_of(*inputs[i])
|
| 327 |
+
if new_size is None:
|
| 328 |
+
new_size = shape
|
| 329 |
+
else:
|
| 330 |
+
new_size[notdim] = V.graph.sizevars.guard_equals(
|
| 331 |
+
shape[notdim], new_size[notdim]
|
| 332 |
+
)
|
| 333 |
+
new_size[dim] += shape[dim]
|
| 334 |
+
offsets_start.append(new_size[dim] - shape[dim])
|
| 335 |
+
offsets_end.append(new_size[dim])
|
| 336 |
+
|
| 337 |
+
assert new_size is not None
|
| 338 |
+
dtype = functools.reduce(
|
| 339 |
+
torch.promote_types, [x.get_dtype() for x in itertools.chain(*inputs)]
|
| 340 |
+
)
|
| 341 |
+
device = inputs[0][0].get_device()
|
| 342 |
+
kernel = ir.ConcatKernel(
|
| 343 |
+
name=None,
|
| 344 |
+
layout=ir.FixedLayout(device, dtype, new_size),
|
| 345 |
+
inputs=[],
|
| 346 |
+
)
|
| 347 |
+
kernel_tensor = ir.TensorBox.create(kernel)
|
| 348 |
+
|
| 349 |
+
for i in range(len(inputs)):
|
| 350 |
+
dst = ir.SliceView.create(kernel_tensor, dim, offsets_start[i], offsets_end[i])
|
| 351 |
+
src = op(*inputs[i], layout=dst.get_layout()).data.data
|
| 352 |
+
assert isinstance(src, (ir.ExternKernelOut, ir.TemplateBuffer))
|
| 353 |
+
src.layout = ir.AliasedLayout(dst)
|
| 354 |
+
kernel.inputs.append(src)
|
| 355 |
+
|
| 356 |
+
kernel.name = V.graph.register_buffer(kernel)
|
| 357 |
+
kernel.inputs = ir.ConcatKernel.unwrap_storage(kernel.inputs)
|
| 358 |
+
return kernel_tensor
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
_cat_1 = CallFunction(aten.cat, Arg(), 1, _users=2)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
@register_lowering_pattern(
|
| 365 |
+
CallFunction(
|
| 366 |
+
aten.cat,
|
| 367 |
+
[
|
| 368 |
+
_cat_1,
|
| 369 |
+
CallFunction(
|
| 370 |
+
aten.slice,
|
| 371 |
+
CallFunction(aten.slice, _cat_1, 0, 0, 9223372036854775807),
|
| 372 |
+
1,
|
| 373 |
+
0,
|
| 374 |
+
KeywordArg("size"),
|
| 375 |
+
),
|
| 376 |
+
],
|
| 377 |
+
1,
|
| 378 |
+
)
|
| 379 |
+
)
|
| 380 |
+
def cat_slice_cat(match, cat_input, size, dim=1):
|
| 381 |
+
"""
|
| 382 |
+
This is an example of a more complex pattern where cat_1 is used
|
| 383 |
+
multiple times inside the pattern. We fold 2 calls to cat into one.
|
| 384 |
+
|
| 385 |
+
Matches:
|
| 386 |
+
cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1)
|
| 387 |
+
slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807)
|
| 388 |
+
slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19)
|
| 389 |
+
cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
Rewrite to:
|
| 393 |
+
slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19)
|
| 394 |
+
cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)
|
| 395 |
+
"""
|
| 396 |
+
first, *rest = cat_input
|
| 397 |
+
# Optimization is optional, because we can just not fold the cat
|
| 398 |
+
# size should be within first.get_size()[dim] such that the optimization is valid.
|
| 399 |
+
# For negative `end`, we currently fallback to not optimizing.
|
| 400 |
+
if size >= 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]):
|
| 401 |
+
# fold 2 cats into 1 cat
|
| 402 |
+
return L[aten.cat](
|
| 403 |
+
[
|
| 404 |
+
first,
|
| 405 |
+
*rest,
|
| 406 |
+
L[aten.slice](first, dim, 0, size),
|
| 407 |
+
],
|
| 408 |
+
dim,
|
| 409 |
+
)
|
| 410 |
+
else:
|
| 411 |
+
# don't expect to hit this case, just fall back
|
| 412 |
+
tmp = L[aten.cat](cat_input, dim)
|
| 413 |
+
return L[aten.cat](
|
| 414 |
+
[
|
| 415 |
+
tmp,
|
| 416 |
+
L[aten.slice](tmp, dim, 0, size),
|
| 417 |
+
],
|
| 418 |
+
dim,
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
@register_lowering_pattern(
|
| 423 |
+
CallFunction(
|
| 424 |
+
aten.add,
|
| 425 |
+
CallFunction(aten.mm, Arg(), Arg()),
|
| 426 |
+
KeywordArg("inp"),
|
| 427 |
+
),
|
| 428 |
+
pass_number=2,
|
| 429 |
+
)
|
| 430 |
+
@register_lowering_pattern(
|
| 431 |
+
CallFunction(
|
| 432 |
+
aten.add,
|
| 433 |
+
KeywordArg("inp"),
|
| 434 |
+
CallFunction(aten.mm, Arg(), Arg()),
|
| 435 |
+
),
|
| 436 |
+
pass_number=2,
|
| 437 |
+
)
|
| 438 |
+
def addmm(match, mat1, mat2, inp):
|
| 439 |
+
if isinstance(inp, ir.TensorBox):
|
| 440 |
+
inp_shape = inp.get_size()
|
| 441 |
+
matched = len(inp_shape) <= 2
|
| 442 |
+
mm_shape = shape_of_mm(mat1, mat2)
|
| 443 |
+
for i, m in zip(inp_shape, mm_shape):
|
| 444 |
+
matched &= i == 1 or i == m
|
| 445 |
+
else: # inp is a Number
|
| 446 |
+
matched = False
|
| 447 |
+
if matched:
|
| 448 |
+
return L[aten.addmm](inp, mat1, mat2)
|
| 449 |
+
else:
|
| 450 |
+
return L[aten.add](inp, L[aten.mm](mat1, mat2))
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def is_valid_splitwithsizes_cat(match):
|
| 454 |
+
split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
|
| 455 |
+
cat_nodes = filter_nodes(match.nodes, aten.cat)
|
| 456 |
+
get_item_nodes = filter_nodes(match.nodes, operator.getitem)
|
| 457 |
+
if len(split_nodes) != 1 or len(cat_nodes) != 1:
|
| 458 |
+
return False
|
| 459 |
+
split_node, cat_node = split_nodes[0], cat_nodes[0]
|
| 460 |
+
# The dim of split and cat should match for passthrough
|
| 461 |
+
if get_arg_value(split_node, 2, "dim") != get_arg_value(cat_node, 1, "dim"):
|
| 462 |
+
return False
|
| 463 |
+
get_item_args = {
|
| 464 |
+
get_arg_value(get_item_node, 1) for get_item_node in get_item_nodes
|
| 465 |
+
}
|
| 466 |
+
assert None not in get_item_args
|
| 467 |
+
split_sizes = get_arg_value(split_node, 1, "split_sizes")
|
| 468 |
+
# All parts of split should be included in the cat
|
| 469 |
+
if get_item_args != set(range(len(split_sizes))):
|
| 470 |
+
return False
|
| 471 |
+
# The order of get_item_args should same with cat_node used.
|
| 472 |
+
# For example, if the split_node like split_with_sizes(input, [2, 2, 3], 1),
|
| 473 |
+
# the cat node should be like cat([get_item(0), get_item(1), get_item(2)], 1).
|
| 474 |
+
cat_items_args_order = [
|
| 475 |
+
get_arg_value(item_node, 1) for item_node in get_arg_value(cat_node, 0)
|
| 476 |
+
]
|
| 477 |
+
if cat_items_args_order != list(range(len(split_sizes))):
|
| 478 |
+
return False
|
| 479 |
+
|
| 480 |
+
return True
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
@register_lowering_pattern(
|
| 484 |
+
CallFunction(
|
| 485 |
+
aten.cat,
|
| 486 |
+
ListOf(
|
| 487 |
+
CallFunction(
|
| 488 |
+
operator.getitem,
|
| 489 |
+
CallFunction(
|
| 490 |
+
aten.split_with_sizes,
|
| 491 |
+
KeywordArg("input_"),
|
| 492 |
+
Ignored(),
|
| 493 |
+
Ignored(),
|
| 494 |
+
_users=MULTIPLE,
|
| 495 |
+
),
|
| 496 |
+
Ignored(),
|
| 497 |
+
),
|
| 498 |
+
),
|
| 499 |
+
Ignored(),
|
| 500 |
+
),
|
| 501 |
+
pass_number=2,
|
| 502 |
+
extra_check=is_valid_splitwithsizes_cat,
|
| 503 |
+
)
|
| 504 |
+
def splitwithsizes_cat_replace(match, input_):
|
| 505 |
+
return input_
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def is_valid_cat_splitwithsizes(match):
|
| 509 |
+
cat_nodes = filter_nodes(match.nodes, aten.cat)
|
| 510 |
+
split_nodes = filter_nodes(match.nodes, aten.split_with_sizes)
|
| 511 |
+
if len(split_nodes) != 1 or len(cat_nodes) != 1:
|
| 512 |
+
return False
|
| 513 |
+
split_node, cat_node = split_nodes[0], cat_nodes[0]
|
| 514 |
+
|
| 515 |
+
# the cat node has other users: can't eliminate
|
| 516 |
+
if len(cat_node.users) > 1:
|
| 517 |
+
return False
|
| 518 |
+
|
| 519 |
+
# the dim of the cat and split should match
|
| 520 |
+
dim = get_arg_value(split_node, 2, "dim")
|
| 521 |
+
if dim != get_arg_value(cat_node, 1, "dim"):
|
| 522 |
+
return False
|
| 523 |
+
|
| 524 |
+
cat_inputs = list(get_arg_value(cat_node, 0))
|
| 525 |
+
split_sizes = get_arg_value(split_node, 1, "split_sizes")
|
| 526 |
+
# the number of input tensors in cat and the
|
| 527 |
+
# length of the split sizes should match
|
| 528 |
+
if len(cat_inputs) != len(split_sizes):
|
| 529 |
+
return False
|
| 530 |
+
|
| 531 |
+
for cat_input, split_size in zip(cat_inputs, split_sizes):
|
| 532 |
+
# each cat input tensor's size along dim
|
| 533 |
+
# should match the corresponding split size
|
| 534 |
+
if "val" not in cat_input.meta:
|
| 535 |
+
return False
|
| 536 |
+
cat_input_size = cat_input.meta["val"].size(dim)
|
| 537 |
+
if cat_input_size != split_size:
|
| 538 |
+
return False
|
| 539 |
+
|
| 540 |
+
return True
|
| 541 |
+
|
| 542 |
+
|
| 543 |
+
@register_lowering_pattern(
|
| 544 |
+
CallFunction(
|
| 545 |
+
aten.split_with_sizes,
|
| 546 |
+
CallFunction(
|
| 547 |
+
aten.cat,
|
| 548 |
+
KeywordArg("input_"),
|
| 549 |
+
Ignored(),
|
| 550 |
+
_users=MULTIPLE,
|
| 551 |
+
),
|
| 552 |
+
Ignored(),
|
| 553 |
+
Ignored(),
|
| 554 |
+
),
|
| 555 |
+
pass_number=2,
|
| 556 |
+
extra_check=is_valid_cat_splitwithsizes,
|
| 557 |
+
)
|
| 558 |
+
def cat_splitwithsizes_replace(match, input_):
|
| 559 |
+
return input_
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def view_to_reshape(gm):
|
| 563 |
+
"""
|
| 564 |
+
Replace view ops in the GraphModule to reshape ops.
|
| 565 |
+
"""
|
| 566 |
+
for nd in gm.graph.nodes:
|
| 567 |
+
if nd.target == torch.ops.aten.view.default:
|
| 568 |
+
nd.target = torch.ops.aten.reshape.default
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def is_pointwise_use(use):
|
| 572 |
+
if not use.op == "call_function":
|
| 573 |
+
return False
|
| 574 |
+
|
| 575 |
+
if not (
|
| 576 |
+
isinstance(use.target, torch._ops.OpOverload) or use.target is operator.getitem
|
| 577 |
+
):
|
| 578 |
+
return False
|
| 579 |
+
|
| 580 |
+
if use.target is operator.getitem or use.target.is_view:
|
| 581 |
+
return all(is_pointwise_use(u) for u in use.users)
|
| 582 |
+
|
| 583 |
+
return torch.Tag.pointwise in use.target.tags
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
@register_graph_pattern(
|
| 587 |
+
CallFunction(aten.addmm, Arg(), Arg(), Arg()),
|
| 588 |
+
pass_dict=pass_patterns[2],
|
| 589 |
+
)
|
| 590 |
+
def unfuse_bias_add_to_pointwise(match: Match, inp, mat1, mat2):
|
| 591 |
+
if not inp.meta["val"].is_cuda:
|
| 592 |
+
return
|
| 593 |
+
|
| 594 |
+
output = match.output_node()
|
| 595 |
+
if not all(is_pointwise_use(use) for use in output.users):
|
| 596 |
+
return
|
| 597 |
+
|
| 598 |
+
def repl(inp, x1, x2):
|
| 599 |
+
return x1 @ x2 + inp
|
| 600 |
+
|
| 601 |
+
with V.fake_mode:
|
| 602 |
+
match.replace_by_example(repl, [inp, mat1, mat2])
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/pre_grad.py
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import logging
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
from torch._dynamo.utils import detect_fake_mode
|
| 8 |
+
from torch.fx.experimental.optimization import (
|
| 9 |
+
matches_module_pattern,
|
| 10 |
+
replace_node_module,
|
| 11 |
+
)
|
| 12 |
+
from torch.fx.passes.shape_prop import ShapeProp
|
| 13 |
+
from torch.nn import functional as F
|
| 14 |
+
from torch.nn.utils.fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
|
| 15 |
+
|
| 16 |
+
from .. import config
|
| 17 |
+
|
| 18 |
+
from ..fx_utils import matches_module_function_pattern
|
| 19 |
+
from ..pattern_matcher import (
|
| 20 |
+
init_once_fakemode,
|
| 21 |
+
PatternMatcherPass,
|
| 22 |
+
stable_topological_sort,
|
| 23 |
+
)
|
| 24 |
+
from ..utils import is_cpu_device
|
| 25 |
+
from .group_batch_fusion import group_batch_fusion_pre_grad_passes
|
| 26 |
+
|
| 27 |
+
log = logging.getLogger(__name__)
|
| 28 |
+
|
| 29 |
+
normalization_pass = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 30 |
+
merge_splits_pass = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 31 |
+
split_cat_pass = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 32 |
+
unbind_stack_pass = PatternMatcherPass(prevent_match_across_mutations=True)
|
| 33 |
+
|
| 34 |
+
pattern_matcher_passes: List[PatternMatcherPass] = [
|
| 35 |
+
normalization_pass,
|
| 36 |
+
merge_splits_pass,
|
| 37 |
+
split_cat_pass,
|
| 38 |
+
unbind_stack_pass,
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@init_once_fakemode
|
| 43 |
+
def lazy_init():
|
| 44 |
+
from . import split_cat # noqa: F401
|
| 45 |
+
|
| 46 |
+
if config.is_fbcode():
|
| 47 |
+
from .fb import split_cat as split_cat_fb # noqa: F401
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def pre_grad_passes(gm, example_inputs):
|
| 51 |
+
"""
|
| 52 |
+
Apply passes on the input FX graph using Torch IR.
|
| 53 |
+
|
| 54 |
+
WARNING:
|
| 55 |
+
The IR before grad is not functional or normalized, so it is harder
|
| 56 |
+
to write passes on this IR. Passes must be safe with respect to
|
| 57 |
+
aliasing and mutation and need to handle all possible arg schemas.
|
| 58 |
+
|
| 59 |
+
Consider adding a new pass to post_grad.py or joint_graph.py which
|
| 60 |
+
are after functionalization and normalization.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
if config.pattern_matcher:
|
| 64 |
+
lazy_init()
|
| 65 |
+
gm = fuse_fx(gm, example_inputs)
|
| 66 |
+
group_batch_fusion_pre_grad_passes(gm.graph)
|
| 67 |
+
for pattern_matcher_pass in pattern_matcher_passes:
|
| 68 |
+
pattern_matcher_pass.apply(gm.graph)
|
| 69 |
+
|
| 70 |
+
stable_topological_sort(gm.graph)
|
| 71 |
+
gm.graph.lint()
|
| 72 |
+
gm.recompile()
|
| 73 |
+
|
| 74 |
+
return gm
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def fuse_fx(gm: torch.fx.GraphModule, example_inputs):
|
| 78 |
+
is_cpu = is_cpu_device(example_inputs)
|
| 79 |
+
|
| 80 |
+
fake_mode = detect_fake_mode(example_inputs)
|
| 81 |
+
|
| 82 |
+
gm = sink_cat_after_pointwise(gm)
|
| 83 |
+
if config.permute_fusion and not is_cpu:
|
| 84 |
+
# For linear permute fusion, we need to check input info to identify
|
| 85 |
+
# and perform proper permutation/transpose
|
| 86 |
+
ShapeProp(gm, fake_mode=fake_mode).propagate(*example_inputs)
|
| 87 |
+
gm = linear_permute_fusion(gm)
|
| 88 |
+
gm = permute_linear_fusion(gm)
|
| 89 |
+
gm = permute_matmul_fusion(gm)
|
| 90 |
+
|
| 91 |
+
# make sure the autograd is disabled.
|
| 92 |
+
if torch.is_grad_enabled():
|
| 93 |
+
return gm
|
| 94 |
+
if not is_cpu:
|
| 95 |
+
return gm
|
| 96 |
+
gm = remove_identity(gm)
|
| 97 |
+
gm = fuse_conv_bn(gm)
|
| 98 |
+
return gm
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def fetch_attr(target: str, mod):
|
| 102 |
+
target_atoms = target.split(".")
|
| 103 |
+
attr_itr = mod
|
| 104 |
+
for i, atom in enumerate(target_atoms):
|
| 105 |
+
if not hasattr(attr_itr, atom):
|
| 106 |
+
raise RuntimeError(
|
| 107 |
+
f"Node referenced nonexistant target {'.'.join(target_atoms[:i])}"
|
| 108 |
+
)
|
| 109 |
+
attr_itr = getattr(attr_itr, atom)
|
| 110 |
+
return attr_itr
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def remove_identity(gm: torch.fx.GraphModule):
|
| 114 |
+
"""
|
| 115 |
+
Removes all identity layers from the module.
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
class IdentityRemover(torch.fx.Transformer):
|
| 119 |
+
def call_module(self, target, args, kwargs):
|
| 120 |
+
if isinstance(self.submodules[target], nn.Identity):
|
| 121 |
+
assert len(args) == 1
|
| 122 |
+
return args[0]
|
| 123 |
+
else:
|
| 124 |
+
return super().call_module(target, args, kwargs)
|
| 125 |
+
|
| 126 |
+
return IdentityRemover(gm).transform()
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False):
|
| 130 |
+
"""
|
| 131 |
+
Fuses Convolution/BN layers for inference purposes.
|
| 132 |
+
"""
|
| 133 |
+
modules_patterns = [
|
| 134 |
+
(torch.nn.Conv1d, torch.nn.BatchNorm1d),
|
| 135 |
+
(torch.nn.Conv2d, torch.nn.BatchNorm2d),
|
| 136 |
+
(torch.nn.Conv3d, torch.nn.BatchNorm3d),
|
| 137 |
+
]
|
| 138 |
+
module_function_patterns = [
|
| 139 |
+
(torch.nn.Conv1d, F.batch_norm),
|
| 140 |
+
(torch.nn.Conv2d, F.batch_norm),
|
| 141 |
+
(torch.nn.Conv3d, F.batch_norm),
|
| 142 |
+
]
|
| 143 |
+
modules = dict(gm.named_modules())
|
| 144 |
+
for pattern in modules_patterns:
|
| 145 |
+
for node in gm.graph.nodes:
|
| 146 |
+
if matches_module_pattern(pattern, node, modules):
|
| 147 |
+
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
|
| 148 |
+
continue
|
| 149 |
+
conv = modules[node.args[0].target]
|
| 150 |
+
bn = modules[node.target]
|
| 151 |
+
eval_mode = all(not n.training for n in [conv, bn])
|
| 152 |
+
if not eval_mode:
|
| 153 |
+
continue
|
| 154 |
+
if not bn.track_running_stats:
|
| 155 |
+
continue
|
| 156 |
+
fused_conv = fuse_conv_bn_eval(conv, bn)
|
| 157 |
+
replace_node_module(node.args[0], modules, fused_conv)
|
| 158 |
+
node.replace_all_uses_with(node.args[0])
|
| 159 |
+
gm.graph.erase_node(node)
|
| 160 |
+
gm.graph.lint()
|
| 161 |
+
for pattern in module_function_patterns:
|
| 162 |
+
for node in gm.graph.nodes:
|
| 163 |
+
if matches_module_function_pattern(pattern, node, modules):
|
| 164 |
+
# TODO: support kwargs.
|
| 165 |
+
if len(node.args) != 8:
|
| 166 |
+
continue
|
| 167 |
+
conv = modules[node.args[0].target]
|
| 168 |
+
bn_training = node.args[5]
|
| 169 |
+
bn_eps = node.args[7]
|
| 170 |
+
if conv.training or bn_training:
|
| 171 |
+
continue
|
| 172 |
+
if type(bn_eps) is not float:
|
| 173 |
+
continue
|
| 174 |
+
bn_args_is_constant = all(
|
| 175 |
+
n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5]
|
| 176 |
+
)
|
| 177 |
+
if not bn_args_is_constant:
|
| 178 |
+
continue
|
| 179 |
+
bn_running_mean = fetch_attr(node.args[1].target, gm)
|
| 180 |
+
bn_running_var = fetch_attr(node.args[2].target, gm)
|
| 181 |
+
bn_weight = fetch_attr(node.args[3].target, gm)
|
| 182 |
+
bn_bias = fetch_attr(node.args[4].target, gm)
|
| 183 |
+
if bn_running_mean is None or bn_running_var is None:
|
| 184 |
+
continue
|
| 185 |
+
fused_conv = copy.deepcopy(conv)
|
| 186 |
+
fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
|
| 187 |
+
fused_conv.weight,
|
| 188 |
+
fused_conv.bias,
|
| 189 |
+
bn_running_mean,
|
| 190 |
+
bn_running_var,
|
| 191 |
+
bn_eps,
|
| 192 |
+
bn_weight,
|
| 193 |
+
bn_bias,
|
| 194 |
+
)
|
| 195 |
+
replace_node_module(node.args[0], modules, fused_conv)
|
| 196 |
+
node.replace_all_uses_with(node.args[0])
|
| 197 |
+
gm.graph.erase_node(node)
|
| 198 |
+
gm.graph.lint()
|
| 199 |
+
gm.recompile()
|
| 200 |
+
|
| 201 |
+
return gm
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
class NormalizedLinearNode:
|
| 205 |
+
def __init__(self, node: torch.fx.Node) -> None:
|
| 206 |
+
assert node.op == "call_function"
|
| 207 |
+
assert node.target in [torch.nn.functional.linear]
|
| 208 |
+
self.node: torch.fx.Node = node
|
| 209 |
+
|
| 210 |
+
def get_input(self) -> torch.fx.Node:
|
| 211 |
+
if len(self.node.args) > 0:
|
| 212 |
+
return self.node.args[0]
|
| 213 |
+
else:
|
| 214 |
+
return self.node.kwargs["input"]
|
| 215 |
+
|
| 216 |
+
def get_weight(self) -> torch.fx.Node:
|
| 217 |
+
if len(self.node.args) > 1:
|
| 218 |
+
return self.node.args[1]
|
| 219 |
+
else:
|
| 220 |
+
return self.node.kwargs["weight"]
|
| 221 |
+
|
| 222 |
+
def get_bias(self) -> torch.fx.Node:
|
| 223 |
+
if len(self.node.args) > 2:
|
| 224 |
+
return self.node.args[2]
|
| 225 |
+
else:
|
| 226 |
+
return self.node.kwargs["bias"] if "bias" in self.node.kwargs else None
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class NormalizedMatmulNode:
|
| 230 |
+
def __init__(self, node: torch.fx.Node) -> None:
|
| 231 |
+
assert node.op == "call_function"
|
| 232 |
+
assert node.target in [torch.bmm, torch.matmul]
|
| 233 |
+
self.node: torch.fx.Node = node
|
| 234 |
+
|
| 235 |
+
def get_input(self) -> torch.fx.Node:
|
| 236 |
+
if len(self.node.args) > 0:
|
| 237 |
+
return self.node.args[0]
|
| 238 |
+
else:
|
| 239 |
+
return self.node.kwargs["input"]
|
| 240 |
+
|
| 241 |
+
def get_other(self) -> torch.fx.Node:
|
| 242 |
+
if len(self.node.args) > 1:
|
| 243 |
+
return self.node.args[1]
|
| 244 |
+
else:
|
| 245 |
+
return self.node.kwargs["other"]
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def check_permute(node: torch.fx.Node):
|
| 249 |
+
ranks = len(node.meta["tensor_meta"].shape)
|
| 250 |
+
if len(node.args) > 3:
|
| 251 |
+
permutation = [node.args[i] % ranks for i in range(1, ranks + 1)]
|
| 252 |
+
elif (
|
| 253 |
+
"permutation" in node.kwargs
|
| 254 |
+
and node.kwargs["permutation"] is not None
|
| 255 |
+
and len(node.kwargs["permutation"]) > 2
|
| 256 |
+
):
|
| 257 |
+
permutation = [i % ranks for i in node.kwargs["permutation"]]
|
| 258 |
+
else:
|
| 259 |
+
return False
|
| 260 |
+
allowed_permutation = list(range(ranks))
|
| 261 |
+
allowed_permutation[-1] = ranks - 2
|
| 262 |
+
allowed_permutation[-2] = ranks - 1
|
| 263 |
+
return permutation == allowed_permutation
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def sink_cat_after_pointwise(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 267 |
+
def one_user(node):
|
| 268 |
+
users = list(node.users)
|
| 269 |
+
return users[0] if len(users) == 1 else None
|
| 270 |
+
|
| 271 |
+
def is_view(node):
|
| 272 |
+
view = {"view"}
|
| 273 |
+
return node.op == "call_method" and node.target in view
|
| 274 |
+
|
| 275 |
+
def is_pointwise_unary(node):
|
| 276 |
+
pointwise = {torch.relu, torch.tanh, "relu", "tanh"}
|
| 277 |
+
return node.op in {"call_function", "call_method"} and node.target in pointwise
|
| 278 |
+
|
| 279 |
+
g = module.graph
|
| 280 |
+
for node in g.nodes:
|
| 281 |
+
if node.op != "call_function" or node.target != torch.cat:
|
| 282 |
+
continue
|
| 283 |
+
|
| 284 |
+
cat_or_view = node
|
| 285 |
+
while True:
|
| 286 |
+
user = one_user(cat_or_view)
|
| 287 |
+
if not user or not is_view(user):
|
| 288 |
+
break
|
| 289 |
+
cat_or_view = user
|
| 290 |
+
|
| 291 |
+
if user and is_pointwise_unary(user):
|
| 292 |
+
with g.inserting_before(node):
|
| 293 |
+
|
| 294 |
+
def cat_args(tensors, dim=0):
|
| 295 |
+
return tensors, dim
|
| 296 |
+
|
| 297 |
+
tensors, dim = cat_args(*node.args, **node.kwargs)
|
| 298 |
+
new_tensors = [
|
| 299 |
+
g.create_node(user.op, user.target, args=(arg,), kwargs=user.kwargs)
|
| 300 |
+
for arg in tensors
|
| 301 |
+
]
|
| 302 |
+
new_cat = g.create_node(
|
| 303 |
+
"call_function", torch.cat, args=(new_tensors, dim)
|
| 304 |
+
)
|
| 305 |
+
user.replace_all_uses_with(cat_or_view)
|
| 306 |
+
node.replace_all_uses_with(new_cat)
|
| 307 |
+
g.erase_node(user)
|
| 308 |
+
g.erase_node(node)
|
| 309 |
+
g.lint()
|
| 310 |
+
module.recompile()
|
| 311 |
+
return module
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def linear_permute_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 315 |
+
for node in module.graph.nodes:
|
| 316 |
+
if (
|
| 317 |
+
node.op == "call_method"
|
| 318 |
+
and node.target == "permute"
|
| 319 |
+
and check_permute(node)
|
| 320 |
+
):
|
| 321 |
+
if len(node.args) > 0:
|
| 322 |
+
input_node = node.args[0]
|
| 323 |
+
else:
|
| 324 |
+
input_node = node.kwargs["input"]
|
| 325 |
+
if (
|
| 326 |
+
input_node.op == "call_function"
|
| 327 |
+
and input_node.target == torch.nn.functional.linear
|
| 328 |
+
):
|
| 329 |
+
normalized = NormalizedLinearNode(input_node)
|
| 330 |
+
input = normalized.get_input()
|
| 331 |
+
weight = normalized.get_weight()
|
| 332 |
+
bias = normalized.get_bias()
|
| 333 |
+
with module.graph.inserting_before(node):
|
| 334 |
+
fused_node = module.graph.call_function(
|
| 335 |
+
linear_transpose, args=(input, weight, bias)
|
| 336 |
+
)
|
| 337 |
+
node.replace_all_uses_with(fused_node)
|
| 338 |
+
module.graph.erase_node(node)
|
| 339 |
+
if len(input_node.users) == 0:
|
| 340 |
+
module.graph.erase_node(input_node)
|
| 341 |
+
|
| 342 |
+
module.graph.lint()
|
| 343 |
+
module.recompile()
|
| 344 |
+
return module
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
# Y1 = X * W^T + bias
|
| 348 |
+
# Y2 = Y1.permute(0, 2, 1)
|
| 349 |
+
# ---->
|
| 350 |
+
# Y2 = (W * X^T + bias.unsqueeze(-1))^T
|
| 351 |
+
def linear_transpose(
|
| 352 |
+
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
|
| 353 |
+
) -> torch.Tensor:
|
| 354 |
+
if bias is None:
|
| 355 |
+
return torch.matmul(weight, input.transpose(-1, -2))
|
| 356 |
+
return torch.matmul(weight, input.transpose(-1, -2)) + bias.unsqueeze(-1)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def permute_linear_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 360 |
+
for node in module.graph.nodes:
|
| 361 |
+
if node.op == "call_function" and node.target == torch.nn.functional.linear:
|
| 362 |
+
if len(node.args) > 0:
|
| 363 |
+
input_node = node.args[0]
|
| 364 |
+
else:
|
| 365 |
+
input_node = node.kwargs["input"]
|
| 366 |
+
if (
|
| 367 |
+
input_node.op == "call_method"
|
| 368 |
+
and input_node.target == "permute"
|
| 369 |
+
and check_permute(input_node)
|
| 370 |
+
):
|
| 371 |
+
normalized = NormalizedLinearNode(node)
|
| 372 |
+
if len(input_node.args) > 0:
|
| 373 |
+
input = input_node.args[0]
|
| 374 |
+
else:
|
| 375 |
+
input = input_node.kwargs["input"]
|
| 376 |
+
weight = normalized.get_weight()
|
| 377 |
+
bias = normalized.get_bias()
|
| 378 |
+
with module.graph.inserting_before(node):
|
| 379 |
+
fused_node = module.graph.call_function(
|
| 380 |
+
transpose_linear, args=(input, weight, bias)
|
| 381 |
+
)
|
| 382 |
+
node.replace_all_uses_with(fused_node)
|
| 383 |
+
module.graph.erase_node(node)
|
| 384 |
+
if len(input_node.users) == 0:
|
| 385 |
+
module.graph.erase_node(input_node)
|
| 386 |
+
|
| 387 |
+
module.graph.lint()
|
| 388 |
+
module.recompile()
|
| 389 |
+
return module
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def permute_matmul_fusion(module: torch.fx.GraphModule) -> torch.fx.GraphModule:
|
| 393 |
+
for node in module.graph.nodes:
|
| 394 |
+
if node.op == "call_function" and (
|
| 395 |
+
node.target == torch.bmm or node.target == torch.matmul
|
| 396 |
+
):
|
| 397 |
+
normalized = NormalizedMatmulNode(node)
|
| 398 |
+
input_A_node = normalized.get_input()
|
| 399 |
+
input_B_node = normalized.get_other()
|
| 400 |
+
input_A = input_A_node
|
| 401 |
+
input_B = input_B_node
|
| 402 |
+
Atrans = Btrans = False
|
| 403 |
+
if (
|
| 404 |
+
input_A_node.op == "call_method"
|
| 405 |
+
and input_A_node.target == "permute"
|
| 406 |
+
and check_permute(input_A_node)
|
| 407 |
+
):
|
| 408 |
+
Atrans = True
|
| 409 |
+
if len(input_A_node.args) > 0:
|
| 410 |
+
input_A = input_A_node.args[0]
|
| 411 |
+
else:
|
| 412 |
+
input_A = input_A_node.kwargs["input"]
|
| 413 |
+
|
| 414 |
+
if (
|
| 415 |
+
input_B_node.op == "call_method"
|
| 416 |
+
and input_B_node.target == "permute"
|
| 417 |
+
and check_permute(input_B_node)
|
| 418 |
+
):
|
| 419 |
+
Btrans = True
|
| 420 |
+
if len(input_B_node.args) > 0:
|
| 421 |
+
input_B = input_B_node.args[0]
|
| 422 |
+
else:
|
| 423 |
+
input_B = input_B_node.kwargs["input"]
|
| 424 |
+
|
| 425 |
+
if Atrans or Btrans:
|
| 426 |
+
with module.graph.inserting_before(node):
|
| 427 |
+
fused_node = module.graph.call_function(
|
| 428 |
+
transpose_matmul,
|
| 429 |
+
args=(input_A, input_B, Atrans, Btrans),
|
| 430 |
+
)
|
| 431 |
+
node.replace_all_uses_with(fused_node)
|
| 432 |
+
module.graph.erase_node(node)
|
| 433 |
+
if Atrans and len(input_A_node.users) == 0:
|
| 434 |
+
module.graph.erase_node(input_A_node)
|
| 435 |
+
if Btrans and len(input_B_node.users) == 0:
|
| 436 |
+
module.graph.erase_node(input_B_node)
|
| 437 |
+
|
| 438 |
+
module.graph.lint()
|
| 439 |
+
module.recompile()
|
| 440 |
+
return module
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
# X1 = X.permute(0, 2, 1)
|
| 444 |
+
# Y1 = X1 * W1^T + bias1
|
| 445 |
+
# ---->
|
| 446 |
+
# Y2 = X1.transpose(-1, -2) * W1^T + bias1
|
| 447 |
+
def transpose_linear(
|
| 448 |
+
input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]
|
| 449 |
+
) -> torch.Tensor:
|
| 450 |
+
if bias is None:
|
| 451 |
+
return torch.matmul(input.transpose(-1, -2), weight.t())
|
| 452 |
+
return torch.matmul(input.transpose(-1, -2), weight.t()) + bias
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def transpose_matmul(A: torch.Tensor, B: torch.Tensor, Atrans: bool, Btrans: bool):
|
| 456 |
+
if Atrans:
|
| 457 |
+
A = A.transpose(-1, -2)
|
| 458 |
+
if Btrans:
|
| 459 |
+
B = B.transpose(-1, -2)
|
| 460 |
+
return torch.matmul(A, B)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/quantization.py
ADDED
|
@@ -0,0 +1,1020 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import functools
|
| 3 |
+
import math
|
| 4 |
+
import operator
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from ..lowering import lowerings as L, require_channels_last
|
| 8 |
+
from ..pattern_matcher import Arg, CallFunction, filter_nodes, KeywordArg, ListOf, Match
|
| 9 |
+
from ..utils import pad_listlike
|
| 10 |
+
from .freezing_patterns import register_freezing_graph_pattern
|
| 11 |
+
from .post_grad import register_lowering_pattern
|
| 12 |
+
|
| 13 |
+
aten = torch.ops.aten
|
| 14 |
+
prims = torch.ops.prims
|
| 15 |
+
quantized_decomposed = torch.ops.quantized_decomposed
|
| 16 |
+
quantized = torch.ops.quantized
|
| 17 |
+
|
| 18 |
+
"""
|
| 19 |
+
dequantize activation:
|
| 20 |
+
x = x.to(fp32)
|
| 21 |
+
x = x - zero_point
|
| 22 |
+
x = x * scale
|
| 23 |
+
"""
|
| 24 |
+
dequantize_per_tensor_activation_pattern = CallFunction(
|
| 25 |
+
aten.mul.Tensor,
|
| 26 |
+
CallFunction(
|
| 27 |
+
aten.sub.Tensor,
|
| 28 |
+
CallFunction(
|
| 29 |
+
prims.convert_element_type.default,
|
| 30 |
+
KeywordArg("x"),
|
| 31 |
+
KeywordArg("x_dq_dtype"),
|
| 32 |
+
),
|
| 33 |
+
KeywordArg("x_zp"),
|
| 34 |
+
),
|
| 35 |
+
KeywordArg("x_scale"),
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
dequantize_per_channel_weight_pattern = CallFunction(
|
| 39 |
+
quantized_decomposed.dequantize_per_channel.default,
|
| 40 |
+
KeywordArg("q_weight"),
|
| 41 |
+
KeywordArg("w_scale"),
|
| 42 |
+
KeywordArg("w_zp"),
|
| 43 |
+
KeywordArg("w_axis"),
|
| 44 |
+
KeywordArg("w_quant_min"),
|
| 45 |
+
KeywordArg("w_quant_max"),
|
| 46 |
+
KeywordArg("w_dtype"),
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
dequantize_per_channel_clone_weight_pattern = CallFunction(
|
| 50 |
+
aten.clone.default,
|
| 51 |
+
dequantize_per_channel_weight_pattern,
|
| 52 |
+
memory_format=KeywordArg("memory_format"),
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
dequantize_qconv_pt2e_pattern = CallFunction(
|
| 56 |
+
torch.ops.onednn.qconv2d_pointwise.default,
|
| 57 |
+
KeywordArg("x"),
|
| 58 |
+
KeywordArg("x_scale"), # x_scale
|
| 59 |
+
KeywordArg("x_zp"), # x_zp
|
| 60 |
+
KeywordArg("packed_weight"), # packed_weight
|
| 61 |
+
KeywordArg("w_scale"), # w_scale
|
| 62 |
+
KeywordArg("w_zp"), # w_zp
|
| 63 |
+
KeywordArg("b"), # bias
|
| 64 |
+
KeywordArg("stride"),
|
| 65 |
+
KeywordArg("padding"),
|
| 66 |
+
KeywordArg("dilation"),
|
| 67 |
+
KeywordArg("groups"),
|
| 68 |
+
KeywordArg("inv_output_scale"), # inv_output_scale = 1.0
|
| 69 |
+
KeywordArg("output_zero_point"), # output_zero_point = 0
|
| 70 |
+
KeywordArg("fp32_output"), # fp32_output = True
|
| 71 |
+
KeywordArg("attr"), # attr = "none"
|
| 72 |
+
Arg(), # scalars
|
| 73 |
+
Arg(), # algorithm
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
qlinear_pt2e_pattern = CallFunction(
|
| 77 |
+
torch.ops.onednn.qlinear_pointwise.default,
|
| 78 |
+
KeywordArg("x"),
|
| 79 |
+
KeywordArg("x_scale"),
|
| 80 |
+
KeywordArg("x_zp"),
|
| 81 |
+
KeywordArg("packed_weight"),
|
| 82 |
+
KeywordArg("w_scale"),
|
| 83 |
+
KeywordArg("w_zp"),
|
| 84 |
+
KeywordArg("b"),
|
| 85 |
+
KeywordArg("output_scale"),
|
| 86 |
+
KeywordArg("output_zero_point"),
|
| 87 |
+
KeywordArg("fp32_output"),
|
| 88 |
+
KeywordArg("postop_name"),
|
| 89 |
+
KeywordArg("postop_args"),
|
| 90 |
+
KeywordArg("postop_algorithm"),
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
dequantize_accum_pattern = CallFunction(
|
| 94 |
+
aten.mul.Tensor,
|
| 95 |
+
CallFunction(
|
| 96 |
+
aten.sub.Tensor,
|
| 97 |
+
CallFunction(
|
| 98 |
+
prims.convert_element_type.default,
|
| 99 |
+
KeywordArg("accum"),
|
| 100 |
+
KeywordArg("accum_dq_dtype"),
|
| 101 |
+
),
|
| 102 |
+
KeywordArg("accum_zp"),
|
| 103 |
+
),
|
| 104 |
+
KeywordArg("accum_scale"),
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def generate_pattern_with_binary(binary_post_op, computation_call, extra_input_pattern):
|
| 109 |
+
return CallFunction(
|
| 110 |
+
binary_post_op,
|
| 111 |
+
computation_call,
|
| 112 |
+
extra_input_pattern,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def generate_pattern_with_unary(computation_call, unary_post_op):
|
| 117 |
+
if unary_post_op is not None:
|
| 118 |
+
return CallFunction(
|
| 119 |
+
unary_post_op,
|
| 120 |
+
computation_call,
|
| 121 |
+
)
|
| 122 |
+
return computation_call
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def generate_pattern_with_output_quant(computation_call):
|
| 126 |
+
"""
|
| 127 |
+
quantize output:
|
| 128 |
+
output = round(output * o_inv_scale)
|
| 129 |
+
output = output + zero_point
|
| 130 |
+
output = clamp_min(output, 0)
|
| 131 |
+
output = clamp_max(output, 127)
|
| 132 |
+
output = output.to(uint8)
|
| 133 |
+
"""
|
| 134 |
+
quantized_op_output_pattern_pt2e = CallFunction(
|
| 135 |
+
prims.convert_element_type.default,
|
| 136 |
+
CallFunction(
|
| 137 |
+
aten.clamp_max.default,
|
| 138 |
+
CallFunction(
|
| 139 |
+
aten.clamp_min.default,
|
| 140 |
+
CallFunction(
|
| 141 |
+
aten.add.Tensor,
|
| 142 |
+
CallFunction(
|
| 143 |
+
aten.round.default,
|
| 144 |
+
CallFunction(
|
| 145 |
+
aten.mul.Tensor,
|
| 146 |
+
computation_call,
|
| 147 |
+
KeywordArg("o_inv_scale"),
|
| 148 |
+
),
|
| 149 |
+
),
|
| 150 |
+
KeywordArg("o_zp"),
|
| 151 |
+
),
|
| 152 |
+
KeywordArg("o_qmin"),
|
| 153 |
+
),
|
| 154 |
+
KeywordArg("o_qmax"),
|
| 155 |
+
),
|
| 156 |
+
KeywordArg("o_dtype"),
|
| 157 |
+
)
|
| 158 |
+
return quantized_op_output_pattern_pt2e
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def _register_quantized_conv_lowering(
|
| 162 |
+
pattern,
|
| 163 |
+
pass_number,
|
| 164 |
+
computation_op,
|
| 165 |
+
fp32_output,
|
| 166 |
+
unary_attr,
|
| 167 |
+
):
|
| 168 |
+
@register_lowering_pattern(pattern, pass_number=pass_number)
|
| 169 |
+
def qconv(match: Match, *args, **kwargs):
|
| 170 |
+
# Activation QParams
|
| 171 |
+
x, x_scale, x_zp = (
|
| 172 |
+
kwargs["x"],
|
| 173 |
+
kwargs["x_scale"],
|
| 174 |
+
kwargs["x_zp"],
|
| 175 |
+
)
|
| 176 |
+
# Weight QParams
|
| 177 |
+
packed_weight, w_scale, w_zp = (
|
| 178 |
+
kwargs["packed_weight"],
|
| 179 |
+
kwargs["w_scale"],
|
| 180 |
+
kwargs["w_zp"],
|
| 181 |
+
)
|
| 182 |
+
# Conv Params
|
| 183 |
+
b, stride, padding, dilation, groups = (
|
| 184 |
+
kwargs["b"],
|
| 185 |
+
kwargs["stride"],
|
| 186 |
+
kwargs["padding"],
|
| 187 |
+
kwargs["dilation"],
|
| 188 |
+
kwargs["groups"],
|
| 189 |
+
)
|
| 190 |
+
# Output QParams
|
| 191 |
+
o_inv_scale, o_zero_point = (
|
| 192 |
+
kwargs["o_inv_scale"],
|
| 193 |
+
kwargs["o_zp"],
|
| 194 |
+
)
|
| 195 |
+
assert (
|
| 196 |
+
kwargs["fp32_output"] is True
|
| 197 |
+
) # Expected int8-in fp32-out qconv in weight prepack phase
|
| 198 |
+
assert (
|
| 199 |
+
kwargs["attr"] == "none"
|
| 200 |
+
) # Expected no post op fused in weight prepack phase
|
| 201 |
+
computation_args = (
|
| 202 |
+
x,
|
| 203 |
+
x_scale,
|
| 204 |
+
x_zp,
|
| 205 |
+
packed_weight,
|
| 206 |
+
w_scale,
|
| 207 |
+
w_zp,
|
| 208 |
+
b,
|
| 209 |
+
stride,
|
| 210 |
+
padding,
|
| 211 |
+
dilation,
|
| 212 |
+
groups,
|
| 213 |
+
o_inv_scale,
|
| 214 |
+
o_zero_point,
|
| 215 |
+
fp32_output,
|
| 216 |
+
unary_attr.op_name,
|
| 217 |
+
unary_attr.scalars_attr,
|
| 218 |
+
unary_attr.algorithm_attr,
|
| 219 |
+
)
|
| 220 |
+
return L[computation_op](*computation_args)
|
| 221 |
+
|
| 222 |
+
return qconv
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _register_quantized_linear_lowering(
|
| 226 |
+
pattern,
|
| 227 |
+
pass_number,
|
| 228 |
+
computation_op,
|
| 229 |
+
fp32_output,
|
| 230 |
+
unary_attr,
|
| 231 |
+
):
|
| 232 |
+
@register_lowering_pattern(pattern, pass_number=pass_number)
|
| 233 |
+
def qlinear(match: Match, *args, **kwargs):
|
| 234 |
+
# Activation QParams
|
| 235 |
+
x, x_scale, x_zp = (
|
| 236 |
+
kwargs["x"],
|
| 237 |
+
kwargs["x_scale"],
|
| 238 |
+
kwargs["x_zp"],
|
| 239 |
+
)
|
| 240 |
+
# Weight QParams
|
| 241 |
+
packed_weight, w_scale, w_zp = (
|
| 242 |
+
kwargs["packed_weight"],
|
| 243 |
+
kwargs["w_scale"],
|
| 244 |
+
kwargs["w_zp"],
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# bias
|
| 248 |
+
b = kwargs["b"] if "b" in kwargs else None
|
| 249 |
+
|
| 250 |
+
# Output QParams
|
| 251 |
+
o_inv_scale, o_zero_point = (
|
| 252 |
+
kwargs["o_inv_scale"],
|
| 253 |
+
kwargs["o_zp"],
|
| 254 |
+
)
|
| 255 |
+
assert (
|
| 256 |
+
kwargs["fp32_output"] is True
|
| 257 |
+
) # Expected int8-in fp32-out qlinear in weight prepack phase
|
| 258 |
+
assert (
|
| 259 |
+
kwargs["postop_name"] == "none"
|
| 260 |
+
) # Expected no post op fused in weight prepack phase
|
| 261 |
+
|
| 262 |
+
computation_args = (
|
| 263 |
+
x,
|
| 264 |
+
x_scale,
|
| 265 |
+
x_zp,
|
| 266 |
+
packed_weight,
|
| 267 |
+
w_scale,
|
| 268 |
+
w_zp,
|
| 269 |
+
b,
|
| 270 |
+
o_inv_scale,
|
| 271 |
+
o_zero_point,
|
| 272 |
+
fp32_output,
|
| 273 |
+
unary_attr.op_name,
|
| 274 |
+
unary_attr.scalars_attr,
|
| 275 |
+
unary_attr.algorithm_attr,
|
| 276 |
+
)
|
| 277 |
+
return L[computation_op](*computation_args)
|
| 278 |
+
|
| 279 |
+
return qlinear
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def _register_quantized_conv_binary_lowering(
|
| 283 |
+
pattern,
|
| 284 |
+
pass_number,
|
| 285 |
+
computation_op,
|
| 286 |
+
fp32_output,
|
| 287 |
+
binary_unary_attr,
|
| 288 |
+
):
|
| 289 |
+
@register_lowering_pattern(pattern, pass_number=pass_number)
|
| 290 |
+
def qconv_binary(match: Match, *args, **kwargs):
|
| 291 |
+
x, x_scale, x_zp = kwargs["x"], kwargs["x_scale"], kwargs["x_zp"]
|
| 292 |
+
accum, accum_scale, accum_zp = (
|
| 293 |
+
kwargs["accum"],
|
| 294 |
+
kwargs["accum_scale"],
|
| 295 |
+
kwargs["accum_zp"],
|
| 296 |
+
)
|
| 297 |
+
packed_weight, w_scale, w_zp = (
|
| 298 |
+
kwargs["packed_weight"],
|
| 299 |
+
kwargs["w_scale"],
|
| 300 |
+
kwargs["w_zp"],
|
| 301 |
+
)
|
| 302 |
+
b, stride, padding, dilation, groups = (
|
| 303 |
+
kwargs["b"],
|
| 304 |
+
kwargs["stride"],
|
| 305 |
+
kwargs["padding"],
|
| 306 |
+
kwargs["dilation"],
|
| 307 |
+
kwargs["groups"],
|
| 308 |
+
)
|
| 309 |
+
o_inv_scale, o_zero_point = (
|
| 310 |
+
kwargs["o_inv_scale"],
|
| 311 |
+
kwargs["o_zp"],
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
computation_args = (
|
| 315 |
+
x,
|
| 316 |
+
x_scale,
|
| 317 |
+
x_zp,
|
| 318 |
+
accum,
|
| 319 |
+
accum_scale,
|
| 320 |
+
accum_zp,
|
| 321 |
+
packed_weight,
|
| 322 |
+
w_scale,
|
| 323 |
+
w_zp,
|
| 324 |
+
b,
|
| 325 |
+
stride,
|
| 326 |
+
padding,
|
| 327 |
+
dilation,
|
| 328 |
+
groups,
|
| 329 |
+
o_inv_scale,
|
| 330 |
+
o_zero_point,
|
| 331 |
+
fp32_output,
|
| 332 |
+
binary_unary_attr.binary_op_name,
|
| 333 |
+
binary_unary_attr.alpha,
|
| 334 |
+
binary_unary_attr.unary_op_name,
|
| 335 |
+
binary_unary_attr.scalars_attr,
|
| 336 |
+
binary_unary_attr.algorithm_attr,
|
| 337 |
+
)
|
| 338 |
+
return L[computation_op](*computation_args)
|
| 339 |
+
|
| 340 |
+
return qconv_binary
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _register_quantization_unary_fusion():
|
| 344 |
+
class UnaryAttr:
|
| 345 |
+
def __init__(self, op_name: str, scalars_attr=None, algorithm_attr=None):
|
| 346 |
+
self.op_name = op_name
|
| 347 |
+
self.scalars_attr = scalars_attr if scalars_attr else []
|
| 348 |
+
self.algorithm_attr = algorithm_attr if algorithm_attr else ""
|
| 349 |
+
|
| 350 |
+
conv_unary_replace_patterns = {
|
| 351 |
+
UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
|
| 352 |
+
dequantize_qconv_pt2e_pattern
|
| 353 |
+
),
|
| 354 |
+
UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
|
| 355 |
+
generate_pattern_with_unary(
|
| 356 |
+
dequantize_qconv_pt2e_pattern, aten.relu.default
|
| 357 |
+
)
|
| 358 |
+
),
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
for unary_attr, patterns in conv_unary_replace_patterns.items():
|
| 362 |
+
# Register qconv2d pattern for ExternKernel Lowering
|
| 363 |
+
_register_quantized_conv_lowering(
|
| 364 |
+
patterns,
|
| 365 |
+
1 if unary_attr.op_name != "none" else 2, # pass_number
|
| 366 |
+
torch.ops.onednn.qconv2d_pointwise, # computation_op
|
| 367 |
+
False, # fp32_output
|
| 368 |
+
unary_attr, # unary_attr
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
linear_unary_replace_patterns = {
|
| 372 |
+
UnaryAttr("none", [], ""): generate_pattern_with_output_quant(
|
| 373 |
+
qlinear_pt2e_pattern
|
| 374 |
+
),
|
| 375 |
+
UnaryAttr("relu", [], ""): generate_pattern_with_output_quant(
|
| 376 |
+
generate_pattern_with_unary(qlinear_pt2e_pattern, aten.relu.default)
|
| 377 |
+
),
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
for unary_attr, patterns in linear_unary_replace_patterns.items():
|
| 381 |
+
_register_quantized_linear_lowering(
|
| 382 |
+
patterns,
|
| 383 |
+
1 if unary_attr.op_name != "none" else 2, # pass_number
|
| 384 |
+
torch.ops.onednn.qlinear_pointwise, # computation_op
|
| 385 |
+
False, # fp32_output
|
| 386 |
+
unary_attr, # unary_attr
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def _register_quantization_binary_fusion():
|
| 391 |
+
class BinaryUnaryAttr:
|
| 392 |
+
def __init__(
|
| 393 |
+
self,
|
| 394 |
+
binary_op_name: str,
|
| 395 |
+
alpha=None,
|
| 396 |
+
unary_op_name: str = "none",
|
| 397 |
+
scalars_attr=None,
|
| 398 |
+
algorithm_attr=None,
|
| 399 |
+
):
|
| 400 |
+
self.binary_op_name = binary_op_name
|
| 401 |
+
self.alpha = alpha if alpha else 1.0
|
| 402 |
+
self.unary_op_name = unary_op_name
|
| 403 |
+
self.scalars_attr = scalars_attr if scalars_attr else []
|
| 404 |
+
self.algorithm_attr = algorithm_attr if algorithm_attr else ""
|
| 405 |
+
|
| 406 |
+
binary_replace_patterns = {
|
| 407 |
+
BinaryUnaryAttr("add", 1.0, "none", [], ""): generate_pattern_with_output_quant(
|
| 408 |
+
generate_pattern_with_binary(
|
| 409 |
+
aten.add.Tensor,
|
| 410 |
+
dequantize_qconv_pt2e_pattern,
|
| 411 |
+
dequantize_accum_pattern,
|
| 412 |
+
)
|
| 413 |
+
),
|
| 414 |
+
BinaryUnaryAttr("add", 1.0, "relu", [], ""): generate_pattern_with_output_quant(
|
| 415 |
+
generate_pattern_with_unary(
|
| 416 |
+
generate_pattern_with_binary(
|
| 417 |
+
aten.add.Tensor,
|
| 418 |
+
dequantize_qconv_pt2e_pattern,
|
| 419 |
+
dequantize_accum_pattern,
|
| 420 |
+
),
|
| 421 |
+
aten.relu.default,
|
| 422 |
+
)
|
| 423 |
+
),
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
for binary_unary_attr, patterns in binary_replace_patterns.items():
|
| 427 |
+
# Register qconv2d_binary_unary pattern for ExternKernel Lowering
|
| 428 |
+
_register_quantized_conv_binary_lowering(
|
| 429 |
+
patterns,
|
| 430 |
+
0 if binary_unary_attr.unary_op_name != "none" else 1, # pass_number
|
| 431 |
+
torch.ops.onednn.qconv2d_pointwise.binary, # computation_op
|
| 432 |
+
False, # fp32_output
|
| 433 |
+
binary_unary_attr, # binary_unary_attr
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def _is_valid_quantized_maxpool2d_optimization_pattern():
|
| 438 |
+
def fn(match):
|
| 439 |
+
# Only match the pattern which max_pool2d_with_indices returns value
|
| 440 |
+
# instead of indices.
|
| 441 |
+
get_item_node = filter_nodes(match.nodes, operator.getitem)[0]
|
| 442 |
+
return get_item_node.args[1] == 0
|
| 443 |
+
|
| 444 |
+
return fn
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
def _register_quantized_maxpool2d_lowering(
|
| 448 |
+
pattern,
|
| 449 |
+
computation_op,
|
| 450 |
+
):
|
| 451 |
+
@register_lowering_pattern(
|
| 452 |
+
pattern,
|
| 453 |
+
extra_check=_is_valid_quantized_maxpool2d_optimization_pattern(),
|
| 454 |
+
)
|
| 455 |
+
def qmaxpool2d(match: Match, *args, **kwargs):
|
| 456 |
+
x = kwargs["x"]
|
| 457 |
+
kernel_size = kwargs["kernel_size"]
|
| 458 |
+
stride = kwargs["stride"] if ("stride" in kwargs) else None
|
| 459 |
+
padding = kwargs["padding"] if ("padding" in kwargs) else 0
|
| 460 |
+
dilation = kwargs["dilation"] if ("dilation" in kwargs) else 1
|
| 461 |
+
ceil_mode = kwargs["ceil_mode"] if ("ceil_mode" in kwargs) else False
|
| 462 |
+
|
| 463 |
+
if padding == 0:
|
| 464 |
+
padding = [0, 0]
|
| 465 |
+
if dilation == 1:
|
| 466 |
+
dilation = [1, 1]
|
| 467 |
+
if not stride:
|
| 468 |
+
stride = kernel_size
|
| 469 |
+
kernel_size = pad_listlike(kernel_size, 2)
|
| 470 |
+
stride = pad_listlike(stride, 2)
|
| 471 |
+
padding = pad_listlike(padding, 2)
|
| 472 |
+
dilation = pad_listlike(dilation, 2)
|
| 473 |
+
|
| 474 |
+
assert len(kernel_size) == 2
|
| 475 |
+
assert len(stride) == 2
|
| 476 |
+
assert len(padding) == 2
|
| 477 |
+
assert len(dilation) == 2
|
| 478 |
+
|
| 479 |
+
computation_args = (
|
| 480 |
+
x,
|
| 481 |
+
kernel_size,
|
| 482 |
+
stride,
|
| 483 |
+
padding,
|
| 484 |
+
dilation,
|
| 485 |
+
ceil_mode,
|
| 486 |
+
)
|
| 487 |
+
computation_args, _ = require_channels_last(computation_op, *computation_args)
|
| 488 |
+
return L[computation_op](*computation_args)
|
| 489 |
+
|
| 490 |
+
return qmaxpool2d
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def _register_quantization_maxpool2d():
|
| 494 |
+
# Currently, the default parameters are not in FX Graph generated by Dynamo export.
|
| 495 |
+
# So, if user defines nn.MaxPool2d with different assignment of default parameter,
|
| 496 |
+
# it will generate graph with different number of input nodes and hence
|
| 497 |
+
# different pattern to be matched.
|
| 498 |
+
# Refer to the issue: https://github.com/pytorch/pytorch/issues/105901
|
| 499 |
+
max_pool2d_args_list = [
|
| 500 |
+
[
|
| 501 |
+
KeywordArg("stride"),
|
| 502 |
+
],
|
| 503 |
+
[
|
| 504 |
+
KeywordArg("stride"),
|
| 505 |
+
KeywordArg("padding"),
|
| 506 |
+
],
|
| 507 |
+
[
|
| 508 |
+
KeywordArg("stride"),
|
| 509 |
+
KeywordArg("padding"),
|
| 510 |
+
KeywordArg("dilation"),
|
| 511 |
+
],
|
| 512 |
+
[
|
| 513 |
+
KeywordArg("stride"),
|
| 514 |
+
KeywordArg("padding"),
|
| 515 |
+
KeywordArg("dilation"),
|
| 516 |
+
KeywordArg("ceil_mode"),
|
| 517 |
+
],
|
| 518 |
+
]
|
| 519 |
+
|
| 520 |
+
for max_pool2d_args in max_pool2d_args_list:
|
| 521 |
+
dequantize_maxpool2d_pattern = CallFunction(
|
| 522 |
+
aten.max_pool2d_with_indices.default,
|
| 523 |
+
dequantize_per_tensor_activation_pattern,
|
| 524 |
+
KeywordArg("kernel_size"),
|
| 525 |
+
*max_pool2d_args,
|
| 526 |
+
)
|
| 527 |
+
dequantize_maxpool2d_get_item_pattern = CallFunction(
|
| 528 |
+
operator.getitem,
|
| 529 |
+
dequantize_maxpool2d_pattern,
|
| 530 |
+
Arg(),
|
| 531 |
+
)
|
| 532 |
+
_register_quantized_maxpool2d_lowering(
|
| 533 |
+
generate_pattern_with_output_quant(dequantize_maxpool2d_get_item_pattern),
|
| 534 |
+
quantized.max_pool2d,
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def _is_valid_quantized_cat_optimization_pattern():
|
| 539 |
+
def fn(match):
|
| 540 |
+
# Ensure all the inputs and output has same scale and zero point
|
| 541 |
+
# Step 1: Check inputs/output zero point
|
| 542 |
+
sub_nodes = filter_nodes(match.nodes, aten.sub.Tensor)
|
| 543 |
+
zero_points = [node.args[1] for node in sub_nodes]
|
| 544 |
+
add_nodes = filter_nodes(match.nodes, aten.add.Tensor)
|
| 545 |
+
assert len(add_nodes) == 1, "expect only 1 add node at output quant pattern"
|
| 546 |
+
zero_points.append(add_nodes[0].args[1])
|
| 547 |
+
if not all(zero_point == zero_points[0] for zero_point in zero_points):
|
| 548 |
+
return False
|
| 549 |
+
|
| 550 |
+
# Step 2: Check inputs/output scale
|
| 551 |
+
mul_nodes = filter_nodes(match.nodes, aten.mul.Tensor)
|
| 552 |
+
# We need to find mul node at output since the scale value is reciprocal to input scale.
|
| 553 |
+
# Mul node at output should connect to cat node directly.
|
| 554 |
+
scales = [
|
| 555 |
+
(
|
| 556 |
+
mul_node.args[1]
|
| 557 |
+
if mul_node.args[0].target is aten.cat.default
|
| 558 |
+
else 1.0 / mul_node.args[1]
|
| 559 |
+
)
|
| 560 |
+
for mul_node in mul_nodes
|
| 561 |
+
]
|
| 562 |
+
if not all(math.isclose(scale, scales[0], rel_tol=1e-5) for scale in scales):
|
| 563 |
+
return False
|
| 564 |
+
|
| 565 |
+
return True
|
| 566 |
+
|
| 567 |
+
return fn
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def _register_quantized_cat_lowering(
|
| 571 |
+
pattern,
|
| 572 |
+
computation_op,
|
| 573 |
+
):
|
| 574 |
+
@register_lowering_pattern(
|
| 575 |
+
pattern,
|
| 576 |
+
extra_check=_is_valid_quantized_cat_optimization_pattern(),
|
| 577 |
+
)
|
| 578 |
+
def qcat(match: Match, inputs, dim, **kwargs):
|
| 579 |
+
# inputs is with format: [[x1, x1_dq_dtype, x1_zp, x1_scale], ...]
|
| 580 |
+
uint8_inputs = [input[0] for input in inputs]
|
| 581 |
+
return L[computation_op](uint8_inputs, dim)
|
| 582 |
+
|
| 583 |
+
return qcat
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
_raw_dequantize_per_tensor_activation_pattern = CallFunction(
|
| 587 |
+
aten.mul.Tensor,
|
| 588 |
+
CallFunction(
|
| 589 |
+
aten.sub.Tensor,
|
| 590 |
+
CallFunction(
|
| 591 |
+
prims.convert_element_type.default,
|
| 592 |
+
Arg(),
|
| 593 |
+
Arg(),
|
| 594 |
+
),
|
| 595 |
+
Arg(),
|
| 596 |
+
),
|
| 597 |
+
Arg(),
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
def _register_quantization_cat():
|
| 602 |
+
dequantize_cat_pattern = CallFunction(
|
| 603 |
+
aten.cat.default,
|
| 604 |
+
ListOf(_raw_dequantize_per_tensor_activation_pattern),
|
| 605 |
+
KeywordArg("dim"),
|
| 606 |
+
)
|
| 607 |
+
_register_quantized_cat_lowering(
|
| 608 |
+
generate_pattern_with_output_quant(dequantize_cat_pattern),
|
| 609 |
+
aten.cat,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
def _register_quantization_lowerings():
|
| 614 |
+
_register_quantization_unary_fusion()
|
| 615 |
+
_register_quantization_binary_fusion()
|
| 616 |
+
_register_quantization_maxpool2d()
|
| 617 |
+
_register_quantization_cat()
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
def _is_valid_dequant_promotion_pattern(match):
|
| 621 |
+
mul_node = match.output_node()
|
| 622 |
+
sub_node = mul_node.args[0]
|
| 623 |
+
to_fp32_node = sub_node.args[0]
|
| 624 |
+
if (
|
| 625 |
+
mul_node.target is aten.mul.Tensor
|
| 626 |
+
and sub_node.target is aten.sub.Tensor
|
| 627 |
+
and to_fp32_node.target is prims.convert_element_type.default
|
| 628 |
+
and len(list(mul_node.users)) > 1
|
| 629 |
+
):
|
| 630 |
+
# dequant pattern has more than 1 users to be promoted
|
| 631 |
+
return True
|
| 632 |
+
return False
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
def _register_dequant_promotion_pass(pattern, pass_number):
|
| 636 |
+
@register_freezing_graph_pattern(
|
| 637 |
+
pattern,
|
| 638 |
+
extra_check=_is_valid_dequant_promotion_pattern,
|
| 639 |
+
pass_number=pass_number,
|
| 640 |
+
)
|
| 641 |
+
def dequant_promotion(match: Match, *args, **kwargs):
|
| 642 |
+
# If dequant pattern used by multiply nodes,
|
| 643 |
+
# we will do dequant promotion. So each user node has a seperate dequant pattern connected.
|
| 644 |
+
def clone_to_new_node(graph, source_node, user_node):
|
| 645 |
+
assert (
|
| 646 |
+
source_node.op == "call_function"
|
| 647 |
+
), "clone_to_new_node only support node.op call_function"
|
| 648 |
+
with graph.inserting_before(user_node):
|
| 649 |
+
new_node = graph.call_function(
|
| 650 |
+
source_node.target,
|
| 651 |
+
args=source_node.args,
|
| 652 |
+
kwargs=source_node.kwargs,
|
| 653 |
+
)
|
| 654 |
+
new_node.meta = copy.copy(source_node.meta)
|
| 655 |
+
user_node.replace_input_with(source_node, new_node)
|
| 656 |
+
return new_node
|
| 657 |
+
|
| 658 |
+
mul_node = match.output_node()
|
| 659 |
+
sub_node = mul_node.args[0]
|
| 660 |
+
to_fp32_node = sub_node.args[0]
|
| 661 |
+
assert mul_node.target is aten.mul.Tensor
|
| 662 |
+
assert sub_node.target is aten.sub.Tensor
|
| 663 |
+
assert to_fp32_node.target is prims.convert_element_type.default
|
| 664 |
+
|
| 665 |
+
graph = match.graph
|
| 666 |
+
user_node_list = list(mul_node.users)
|
| 667 |
+
for user_node in user_node_list:
|
| 668 |
+
# Step1: Duplicate the mul node
|
| 669 |
+
new_mul_node = clone_to_new_node(graph, mul_node, user_node)
|
| 670 |
+
# Step2: Duplicate the sub node
|
| 671 |
+
new_sub_node = clone_to_new_node(graph, sub_node, new_mul_node)
|
| 672 |
+
# Step3: Duplicate the to_fp32 node
|
| 673 |
+
_ = clone_to_new_node(graph, to_fp32_node, new_sub_node)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def _is_valid_dequant_conv2d_pattern(match):
|
| 677 |
+
# Here we do some further check to ensure:
|
| 678 |
+
# 1. It's a conv2d node with dim of 4, since we only support lowering of conv2d now.
|
| 679 |
+
# 2. The dequant pattern has only 1 user of conv2d node.
|
| 680 |
+
# If these conditions don't meet, we will not
|
| 681 |
+
# insert weight prepack node into the matched pattern.
|
| 682 |
+
conv_node = match.output_node()
|
| 683 |
+
assert conv_node.target is aten.convolution.default
|
| 684 |
+
input_meta_value = conv_node.args[0].meta.get("val")
|
| 685 |
+
weight_meta_value = conv_node.args[1].meta.get("val")
|
| 686 |
+
for meta_value in [input_meta_value, weight_meta_value]:
|
| 687 |
+
if (
|
| 688 |
+
meta_value is None
|
| 689 |
+
or meta_value.device.type != "cpu"
|
| 690 |
+
or meta_value.dim() != 4
|
| 691 |
+
):
|
| 692 |
+
# Only support conv2d now
|
| 693 |
+
return False
|
| 694 |
+
|
| 695 |
+
mul_node = conv_node.args[0]
|
| 696 |
+
sub_node = mul_node.args[0]
|
| 697 |
+
to_fp32_node = sub_node.args[0]
|
| 698 |
+
|
| 699 |
+
assert to_fp32_node.target is prims.convert_element_type.default
|
| 700 |
+
assert sub_node.target is aten.sub.Tensor
|
| 701 |
+
assert mul_node.target is aten.mul.Tensor
|
| 702 |
+
if (
|
| 703 |
+
len(list(to_fp32_node.users)) != 1
|
| 704 |
+
or len(list(sub_node.users)) != 1
|
| 705 |
+
or len(list(mul_node.users)) != 1
|
| 706 |
+
):
|
| 707 |
+
# Ensure the dequant pattern only has 1 user
|
| 708 |
+
# since we will delete the dequant pattern here
|
| 709 |
+
return False
|
| 710 |
+
return True
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def _register_qconv_weight_prepack_pass(pattern, pass_number):
|
| 714 |
+
@register_freezing_graph_pattern(
|
| 715 |
+
pattern,
|
| 716 |
+
extra_check=_is_valid_dequant_conv2d_pattern,
|
| 717 |
+
pass_number=pass_number,
|
| 718 |
+
)
|
| 719 |
+
def qconv_weight_prepack(match: Match, *args, **kwargs):
|
| 720 |
+
"""
|
| 721 |
+
Match the pattern:
|
| 722 |
+
int8 activation
|
| 723 |
+
|
|
| 724 |
+
dequant_per_tensor
|
| 725 |
+
|
|
| 726 |
+
Conv2d <- optional(aten.clone.default) <- dequant_per_channel <- int8_weight
|
| 727 |
+
|
| 728 |
+
Insert weight prepack node and change the pattern to:
|
| 729 |
+
int8 activation
|
| 730 |
+
|
|
| 731 |
+
onednn.qconv2d_pointwise <- onednn.qconv_prepack <- int8_weight
|
| 732 |
+
"""
|
| 733 |
+
conv_node = match.output_node()
|
| 734 |
+
assert conv_node.target is aten.convolution.default
|
| 735 |
+
mul_node = conv_node.args[0]
|
| 736 |
+
sub_node = mul_node.args[0]
|
| 737 |
+
to_fp32_node = sub_node.args[0]
|
| 738 |
+
has_clone_to_channel_last_node_in_pattern = (
|
| 739 |
+
conv_node.args[1].target is aten.clone.default
|
| 740 |
+
)
|
| 741 |
+
clone_node = (
|
| 742 |
+
conv_node.args[1] if has_clone_to_channel_last_node_in_pattern else None
|
| 743 |
+
)
|
| 744 |
+
dequant_per_channel = (
|
| 745 |
+
clone_node.args[0]
|
| 746 |
+
if has_clone_to_channel_last_node_in_pattern
|
| 747 |
+
else conv_node.args[1]
|
| 748 |
+
)
|
| 749 |
+
assert (
|
| 750 |
+
dequant_per_channel.target
|
| 751 |
+
is quantized_decomposed.dequantize_per_channel.default
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
# Activation QParams
|
| 755 |
+
qx, x_zp, x_scale = (
|
| 756 |
+
kwargs["x"],
|
| 757 |
+
kwargs["x_zp"],
|
| 758 |
+
kwargs["x_scale"],
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
# Weight QParams
|
| 762 |
+
qw, w_scale, w_zp = (
|
| 763 |
+
kwargs["q_weight"],
|
| 764 |
+
kwargs["w_scale"],
|
| 765 |
+
kwargs["w_zp"],
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
# Conv Params
|
| 769 |
+
bias, stride, padding, dilation, groups = (
|
| 770 |
+
kwargs["b"],
|
| 771 |
+
kwargs["stride"],
|
| 772 |
+
kwargs["padding"],
|
| 773 |
+
kwargs["dilation"],
|
| 774 |
+
kwargs["groups"],
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
x_shape = qx.meta.get("tensor_meta").shape
|
| 778 |
+
graph = match.graph
|
| 779 |
+
with graph.inserting_before(conv_node):
|
| 780 |
+
# Insert weight prepack node and the QConv node
|
| 781 |
+
packed_weight_inputs = (
|
| 782 |
+
qw,
|
| 783 |
+
w_scale,
|
| 784 |
+
x_scale,
|
| 785 |
+
x_zp,
|
| 786 |
+
stride,
|
| 787 |
+
padding,
|
| 788 |
+
dilation,
|
| 789 |
+
groups,
|
| 790 |
+
x_shape,
|
| 791 |
+
)
|
| 792 |
+
packed_weight_op = torch.ops.onednn.qconv_prepack
|
| 793 |
+
prepack_weight_node = graph.call_function(
|
| 794 |
+
packed_weight_op, args=packed_weight_inputs
|
| 795 |
+
)
|
| 796 |
+
|
| 797 |
+
new_args = (
|
| 798 |
+
qx,
|
| 799 |
+
x_scale,
|
| 800 |
+
x_zp,
|
| 801 |
+
prepack_weight_node,
|
| 802 |
+
w_scale,
|
| 803 |
+
w_zp,
|
| 804 |
+
bias,
|
| 805 |
+
stride,
|
| 806 |
+
padding,
|
| 807 |
+
dilation,
|
| 808 |
+
groups,
|
| 809 |
+
1.0, # inv_output_scale
|
| 810 |
+
0, # output_zero_point
|
| 811 |
+
True, # fp32_output
|
| 812 |
+
"none", # attr
|
| 813 |
+
[], # scalars
|
| 814 |
+
"", # algorithm
|
| 815 |
+
)
|
| 816 |
+
new_conv_node = graph.call_function(
|
| 817 |
+
torch.ops.onednn.qconv2d_pointwise.default, args=new_args
|
| 818 |
+
)
|
| 819 |
+
conv_node.replace_all_uses_with(new_conv_node)
|
| 820 |
+
new_conv_node.meta.update(conv_node.meta)
|
| 821 |
+
|
| 822 |
+
# Erase the original conv node
|
| 823 |
+
graph.erase_node(conv_node)
|
| 824 |
+
# Erase the dequant pattern
|
| 825 |
+
graph.erase_node(mul_node)
|
| 826 |
+
graph.erase_node(sub_node)
|
| 827 |
+
graph.erase_node(to_fp32_node)
|
| 828 |
+
# Erase the dequant per channel pattern
|
| 829 |
+
if clone_node is not None:
|
| 830 |
+
graph.erase_node(clone_node)
|
| 831 |
+
graph.erase_node(dequant_per_channel)
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
def _generate_dequant_convolution_node_pattern(_dequant_per_channel_pattern):
|
| 835 |
+
dequant_convolution_node_pattern = CallFunction(
|
| 836 |
+
aten.convolution.default,
|
| 837 |
+
dequantize_per_tensor_activation_pattern,
|
| 838 |
+
_dequant_per_channel_pattern,
|
| 839 |
+
KeywordArg("b"),
|
| 840 |
+
KeywordArg("stride"),
|
| 841 |
+
KeywordArg("padding"),
|
| 842 |
+
KeywordArg("dilation"),
|
| 843 |
+
KeywordArg("is_transposed"),
|
| 844 |
+
KeywordArg("out_padding"),
|
| 845 |
+
KeywordArg("groups"),
|
| 846 |
+
)
|
| 847 |
+
return dequant_convolution_node_pattern
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def _generate_qconv_weight_prepack_patterns():
|
| 851 |
+
return (
|
| 852 |
+
_generate_dequant_convolution_node_pattern(
|
| 853 |
+
dequantize_per_channel_weight_pattern
|
| 854 |
+
),
|
| 855 |
+
# There is another pattern due to the pass of convert_conv_weights_to_channels_last
|
| 856 |
+
# https://github.com/pytorch/pytorch/blob/07107919297db3f8ab37f11c12666b6d6d5f692e/torch/_inductor/freezing.py#L338-L362.
|
| 857 |
+
# Depend on some heuristics, it may or may not insert to(channel_last) node
|
| 858 |
+
# between convolution and dequant_per_channel node
|
| 859 |
+
_generate_dequant_convolution_node_pattern(
|
| 860 |
+
dequantize_per_channel_clone_weight_pattern
|
| 861 |
+
),
|
| 862 |
+
)
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
def _is_valid_dequant_linear_pattern(match):
|
| 866 |
+
# Check dequant pattern has only 1 user.
|
| 867 |
+
linear_node = match.output_node()
|
| 868 |
+
assert linear_node.target in (aten.addmm.default, aten.mm.default)
|
| 869 |
+
input_index = 0 if linear_node.target is aten.mm.default else 1
|
| 870 |
+
mul_node = linear_node.args[input_index]
|
| 871 |
+
sub_node = mul_node.args[0]
|
| 872 |
+
to_fp32_node = sub_node.args[0]
|
| 873 |
+
|
| 874 |
+
assert to_fp32_node.target is prims.convert_element_type.default
|
| 875 |
+
assert sub_node.target is aten.sub.Tensor
|
| 876 |
+
assert mul_node.target is aten.mul.Tensor
|
| 877 |
+
if (
|
| 878 |
+
len(list(to_fp32_node.users)) != 1
|
| 879 |
+
or len(list(sub_node.users)) != 1
|
| 880 |
+
or len(list(mul_node.users)) != 1
|
| 881 |
+
):
|
| 882 |
+
# Ensure the dequant pattern only has 1 user
|
| 883 |
+
# since we will delete the dequant pattern here
|
| 884 |
+
return False
|
| 885 |
+
return True
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
def _register_qlinear_weight_prepack_pass(pattern, pass_number):
|
| 889 |
+
@register_freezing_graph_pattern(
|
| 890 |
+
pattern,
|
| 891 |
+
extra_check=_is_valid_dequant_linear_pattern,
|
| 892 |
+
pass_number=pass_number,
|
| 893 |
+
)
|
| 894 |
+
def qlinear_weight_prepack(match: Match, *args, **kwargs):
|
| 895 |
+
"""
|
| 896 |
+
Match the pattern:
|
| 897 |
+
int8 activation
|
| 898 |
+
|
|
| 899 |
+
dequant_per_tensor
|
| 900 |
+
|
|
| 901 |
+
mm/addmm <- t <- dequant_per_channel <- int8_weight
|
| 902 |
+
|
| 903 |
+
Insert weight prepack node and change the pattern to:
|
| 904 |
+
int8 activation
|
| 905 |
+
|
|
| 906 |
+
onednn.qlinear_pointwise <- onednn.qlinear_prepack <- int8_weight
|
| 907 |
+
"""
|
| 908 |
+
linear_node = match.output_node()
|
| 909 |
+
assert linear_node.target in (aten.addmm.default, aten.mm.default)
|
| 910 |
+
input_index = 0 if linear_node.target is aten.mm.default else 1
|
| 911 |
+
weight_index = input_index + 1
|
| 912 |
+
mul_node = linear_node.args[input_index]
|
| 913 |
+
sub_node = mul_node.args[0]
|
| 914 |
+
to_fp32_node = sub_node.args[0]
|
| 915 |
+
t_node = linear_node.args[weight_index]
|
| 916 |
+
dequant_per_channel = t_node.args[0]
|
| 917 |
+
assert (
|
| 918 |
+
dequant_per_channel.target
|
| 919 |
+
is quantized_decomposed.dequantize_per_channel.default
|
| 920 |
+
)
|
| 921 |
+
|
| 922 |
+
# Activation QParams
|
| 923 |
+
qx, x_zp, x_scale = (
|
| 924 |
+
kwargs["x"],
|
| 925 |
+
kwargs["x_zp"],
|
| 926 |
+
kwargs["x_scale"],
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
# Weight QParams
|
| 930 |
+
qw, w_scale, w_zp = (
|
| 931 |
+
kwargs["q_weight"],
|
| 932 |
+
kwargs["w_scale"],
|
| 933 |
+
kwargs["w_zp"],
|
| 934 |
+
)
|
| 935 |
+
|
| 936 |
+
# Params
|
| 937 |
+
bias = kwargs["b"] if "b" in kwargs else None
|
| 938 |
+
|
| 939 |
+
x_shape = qx.meta.get("tensor_meta").shape
|
| 940 |
+
graph = match.graph
|
| 941 |
+
with graph.inserting_before(linear_node):
|
| 942 |
+
# Insert weight prepack node and the qlinear node
|
| 943 |
+
packed_weight_inputs = (
|
| 944 |
+
qw,
|
| 945 |
+
x_shape,
|
| 946 |
+
)
|
| 947 |
+
packed_weight_op = torch.ops.onednn.qlinear_prepack
|
| 948 |
+
prepack_weight_node = graph.call_function(
|
| 949 |
+
packed_weight_op, args=packed_weight_inputs
|
| 950 |
+
)
|
| 951 |
+
|
| 952 |
+
new_args = (
|
| 953 |
+
qx,
|
| 954 |
+
x_scale,
|
| 955 |
+
x_zp,
|
| 956 |
+
prepack_weight_node,
|
| 957 |
+
w_scale,
|
| 958 |
+
w_zp,
|
| 959 |
+
bias,
|
| 960 |
+
1.0, # output_scale
|
| 961 |
+
0, # output_zero_point
|
| 962 |
+
True, # fp32_output
|
| 963 |
+
"none", # post op name
|
| 964 |
+
[], # post op args
|
| 965 |
+
"", # post op algorithm
|
| 966 |
+
)
|
| 967 |
+
new_linear_node = graph.call_function(
|
| 968 |
+
torch.ops.onednn.qlinear_pointwise.default, args=new_args
|
| 969 |
+
)
|
| 970 |
+
linear_node.replace_all_uses_with(new_linear_node)
|
| 971 |
+
new_linear_node.meta.update(linear_node.meta)
|
| 972 |
+
|
| 973 |
+
# Erase the original linear node
|
| 974 |
+
graph.erase_node(linear_node)
|
| 975 |
+
# Erase the dequant pattern
|
| 976 |
+
graph.erase_node(mul_node)
|
| 977 |
+
graph.erase_node(sub_node)
|
| 978 |
+
graph.erase_node(to_fp32_node)
|
| 979 |
+
# Erase the dequant per channel pattern
|
| 980 |
+
graph.erase_node(t_node)
|
| 981 |
+
graph.erase_node(dequant_per_channel)
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
def _generate_dequant_linear_node_pattern(_dequant_per_channel_pattern):
|
| 985 |
+
t_pattern = CallFunction(
|
| 986 |
+
aten.permute.default,
|
| 987 |
+
_dequant_per_channel_pattern,
|
| 988 |
+
KeywordArg("permute_axes"),
|
| 989 |
+
)
|
| 990 |
+
dequant_linear_bias_pattern = CallFunction(
|
| 991 |
+
aten.addmm.default,
|
| 992 |
+
KeywordArg("b"),
|
| 993 |
+
dequantize_per_tensor_activation_pattern,
|
| 994 |
+
t_pattern,
|
| 995 |
+
)
|
| 996 |
+
dequant_linear_no_bias_pattern = CallFunction(
|
| 997 |
+
aten.mm.default,
|
| 998 |
+
dequantize_per_tensor_activation_pattern,
|
| 999 |
+
t_pattern,
|
| 1000 |
+
)
|
| 1001 |
+
return dequant_linear_bias_pattern, dequant_linear_no_bias_pattern
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
def _generate_qlinear_weight_prepack_patterns():
|
| 1005 |
+
return _generate_dequant_linear_node_pattern(dequantize_per_channel_weight_pattern)
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
@functools.lru_cache(None)
|
| 1009 |
+
def _register_quantization_weight_pack_pass():
|
| 1010 |
+
_register_dequant_promotion_pass(
|
| 1011 |
+
dequantize_per_tensor_activation_pattern, pass_number=0
|
| 1012 |
+
) # pass_number=0 to run before weight prepack
|
| 1013 |
+
weight_prepack_patterns = _generate_qconv_weight_prepack_patterns()
|
| 1014 |
+
for weight_prepack_pattern in weight_prepack_patterns:
|
| 1015 |
+
# Register to pass_number 1, so we can do dequant promotion in pass_number 0.
|
| 1016 |
+
_register_qconv_weight_prepack_pass(weight_prepack_pattern, pass_number=1)
|
| 1017 |
+
weight_prepack_patterns = _generate_qlinear_weight_prepack_patterns()
|
| 1018 |
+
for weight_prepack_pattern in weight_prepack_patterns:
|
| 1019 |
+
# Register to pass_number 1, so we can do dequant promotion in pass_number 0.
|
| 1020 |
+
_register_qlinear_weight_prepack_pass(weight_prepack_pattern, pass_number=1)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/replace_random.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from torch.fx.passes.shape_prop import _extract_tensor_metadata
|
| 7 |
+
from .. import config, inductor_prims
|
| 8 |
+
from ..pattern_matcher import (
|
| 9 |
+
CallFunctionVarArgs,
|
| 10 |
+
Match,
|
| 11 |
+
PatternMatcherPass,
|
| 12 |
+
register_graph_pattern,
|
| 13 |
+
)
|
| 14 |
+
from ..virtualized import V
|
| 15 |
+
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
patterns = PatternMatcherPass()
|
| 18 |
+
aten = torch.ops.aten
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def replace_random_passes(gm: torch.fx.GraphModule):
|
| 22 |
+
"""Modify the given FX graph to use backend-native random ops"""
|
| 23 |
+
if config.fallback_random:
|
| 24 |
+
return 0
|
| 25 |
+
|
| 26 |
+
count = patterns.apply(gm)
|
| 27 |
+
count += fuse_seed_creation_pass(gm.graph)
|
| 28 |
+
|
| 29 |
+
return count
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def fuse_seed_creation_pass(graph: torch.fx.Graph):
|
| 33 |
+
"""
|
| 34 |
+
Horizontally fuse all the seed generation on each device
|
| 35 |
+
|
| 36 |
+
a = inductor_seed(dev)
|
| 37 |
+
b = inductor_seed(dev)
|
| 38 |
+
|
| 39 |
+
Becomes:
|
| 40 |
+
seeds = inductor_seeds(2, dev)
|
| 41 |
+
a = inductor_lookup_seed(seeds, 0)
|
| 42 |
+
b = inductor_lookup_seed(seeds, 1)
|
| 43 |
+
|
| 44 |
+
We do this because seed creation is entirely launch overhead bound.
|
| 45 |
+
"""
|
| 46 |
+
device_seeds = collections.defaultdict(list)
|
| 47 |
+
for node in graph.nodes:
|
| 48 |
+
if CallFunctionVarArgs(inductor_prims.seed).match(node):
|
| 49 |
+
device_seeds[node.args[0]].append(node)
|
| 50 |
+
|
| 51 |
+
if not device_seeds:
|
| 52 |
+
return 0
|
| 53 |
+
|
| 54 |
+
for device, seeds in device_seeds.items():
|
| 55 |
+
with graph.inserting_before(seeds[0]):
|
| 56 |
+
combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))
|
| 57 |
+
with V.fake_mode:
|
| 58 |
+
combined.meta["val"] = torch.empty(
|
| 59 |
+
[len(seeds)], device=device, dtype=torch.int64
|
| 60 |
+
)
|
| 61 |
+
combined.meta["tensor_meta"] = _extract_tensor_metadata(
|
| 62 |
+
combined.meta["val"]
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
for idx, seed in enumerate(seeds):
|
| 66 |
+
with graph.inserting_before(seed):
|
| 67 |
+
new_seed = graph.call_function(
|
| 68 |
+
inductor_prims.lookup_seed, (combined, idx)
|
| 69 |
+
)
|
| 70 |
+
seed.replace_all_uses_with(new_seed)
|
| 71 |
+
new_seed.meta.update(seed.meta)
|
| 72 |
+
graph.erase_node(seed)
|
| 73 |
+
|
| 74 |
+
return len(device_seeds)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def default_kwargs(device):
|
| 78 |
+
return {}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_device(device):
|
| 82 |
+
if device is not None:
|
| 83 |
+
return device
|
| 84 |
+
return torch.empty([]).device # default device
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@register_graph_pattern(CallFunctionVarArgs(aten.rand.default), pass_dict=patterns)
|
| 88 |
+
@register_graph_pattern(CallFunctionVarArgs(aten.randn.default), pass_dict=patterns)
|
| 89 |
+
def replace_random(
|
| 90 |
+
match: Match, size, *, dtype=None, device=None, layout=None, pin_memory=None
|
| 91 |
+
):
|
| 92 |
+
def replacement(size):
|
| 93 |
+
result = inductor_prims.random(
|
| 94 |
+
size, inductor_prims.seed(device), mode, **default_kwargs(device)
|
| 95 |
+
)
|
| 96 |
+
if dtype is not None:
|
| 97 |
+
result = result.to(dtype)
|
| 98 |
+
return result
|
| 99 |
+
|
| 100 |
+
mode = {
|
| 101 |
+
aten.rand.default: "rand",
|
| 102 |
+
aten.randn.default: "randn",
|
| 103 |
+
}[match.output_node().target]
|
| 104 |
+
device = get_device(device)
|
| 105 |
+
match.replace_by_example(replacement, [size])
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
@register_graph_pattern(CallFunctionVarArgs(aten.randint.low), pass_dict=patterns)
|
| 109 |
+
def replace_randint(
|
| 110 |
+
match: Match,
|
| 111 |
+
low,
|
| 112 |
+
high,
|
| 113 |
+
size,
|
| 114 |
+
*,
|
| 115 |
+
dtype=torch.int64,
|
| 116 |
+
device=None,
|
| 117 |
+
layout=None,
|
| 118 |
+
pin_memory=None,
|
| 119 |
+
):
|
| 120 |
+
def replacement(size):
|
| 121 |
+
result = inductor_prims.randint(low, high, size, inductor_prims.seed(device))
|
| 122 |
+
return result.to(dtype)
|
| 123 |
+
|
| 124 |
+
device = get_device(device)
|
| 125 |
+
match.replace_by_example(replacement, [size])
|
llava_next/lib/python3.10/site-packages/torch/_inductor/fx_passes/split_cat.py
ADDED
|
@@ -0,0 +1,982 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import logging
|
| 3 |
+
import operator
|
| 4 |
+
from typing import Callable, List, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from torch._dynamo.utils import counters
|
| 8 |
+
|
| 9 |
+
from ..pattern_matcher import (
|
| 10 |
+
Arg,
|
| 11 |
+
CallFunction,
|
| 12 |
+
CallFunctionVarArgs,
|
| 13 |
+
CallMethodVarArgs,
|
| 14 |
+
config_flag,
|
| 15 |
+
FailedMatch,
|
| 16 |
+
get_arg_value,
|
| 17 |
+
Ignored,
|
| 18 |
+
KeywordArg,
|
| 19 |
+
ListOf,
|
| 20 |
+
Match,
|
| 21 |
+
MatchContext,
|
| 22 |
+
MULTIPLE,
|
| 23 |
+
PatternExpr,
|
| 24 |
+
register_graph_pattern,
|
| 25 |
+
RepeatedExpr,
|
| 26 |
+
)
|
| 27 |
+
from .pre_grad import (
|
| 28 |
+
merge_splits_pass,
|
| 29 |
+
normalization_pass,
|
| 30 |
+
split_cat_pass,
|
| 31 |
+
unbind_stack_pass,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
log = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _get_split_args_default(split_node):
|
| 38 |
+
input_kwarg = "tensor"
|
| 39 |
+
split_size_kwarg = "split_size_or_sections"
|
| 40 |
+
dim_kwarg = "dim"
|
| 41 |
+
default_dim_value = 0
|
| 42 |
+
if split_node.op == "call_method":
|
| 43 |
+
split_size_kwarg = "split_size"
|
| 44 |
+
return (
|
| 45 |
+
get_arg_value(split_node, 0, input_kwarg),
|
| 46 |
+
get_arg_value(split_node, 1, split_size_kwarg),
|
| 47 |
+
get_arg_value(split_node, 2, dim_kwarg) or default_dim_value,
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def normalize_split_base(match: Match, _get_split_args: Callable):
|
| 52 |
+
"""
|
| 53 |
+
Normalize split with split_size into split_with_sizes, so that we only deal with one type of split in
|
| 54 |
+
subsequent optimizations
|
| 55 |
+
"""
|
| 56 |
+
split_node = match.nodes[0]
|
| 57 |
+
graph = match.graph
|
| 58 |
+
split_input, split_size, split_dim = _get_split_args(split_node)
|
| 59 |
+
if split_input is None or split_dim is None or split_size is None:
|
| 60 |
+
log.info("couldn't find split args")
|
| 61 |
+
return
|
| 62 |
+
if "example_value" not in split_node.meta:
|
| 63 |
+
log.warning("example value absent for node: %s", split_node)
|
| 64 |
+
return
|
| 65 |
+
assert isinstance(split_node.meta["example_value"], (list, tuple))
|
| 66 |
+
split_sections = [t.size()[split_dim] for t in split_node.meta["example_value"]]
|
| 67 |
+
|
| 68 |
+
if any(isinstance(section, torch.SymInt) for section in split_sections):
|
| 69 |
+
# TODO dynamic_shapes with assume_static_by_default=False fails while AOT Autograd tracing.
|
| 70 |
+
return
|
| 71 |
+
if split_dim < 0: # Normalize split dim
|
| 72 |
+
split_dim += split_input.meta["example_value"].dim()
|
| 73 |
+
with graph.inserting_after(split_node):
|
| 74 |
+
new_split_node = graph.call_function(
|
| 75 |
+
torch.split,
|
| 76 |
+
args=(split_input, split_sections),
|
| 77 |
+
kwargs={"dim": split_dim},
|
| 78 |
+
)
|
| 79 |
+
split_node.replace_all_uses_with(new_split_node)
|
| 80 |
+
new_split_node.meta.update(split_node.meta)
|
| 81 |
+
graph.erase_node(split_node)
|
| 82 |
+
counters["inductor"]["split_cat_norm"] += 1
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@register_graph_pattern(
|
| 86 |
+
CallFunctionVarArgs(torch.split, users=MULTIPLE),
|
| 87 |
+
pass_dict=normalization_pass,
|
| 88 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 89 |
+
)
|
| 90 |
+
@register_graph_pattern(
|
| 91 |
+
CallMethodVarArgs("split", users=MULTIPLE),
|
| 92 |
+
pass_dict=normalization_pass,
|
| 93 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 94 |
+
)
|
| 95 |
+
def normalize_split_default(match: Match, *args, **kwargs):
|
| 96 |
+
return normalize_split_base(match, _get_split_args_default)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@register_graph_pattern(
|
| 100 |
+
CallFunctionVarArgs(torch.cat, users=MULTIPLE),
|
| 101 |
+
pass_dict=normalization_pass,
|
| 102 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 103 |
+
)
|
| 104 |
+
def normalize_cat_default(match: Match, *args, **kwargs):
|
| 105 |
+
cat_node = match.nodes[0]
|
| 106 |
+
graph = match.graph
|
| 107 |
+
tensors = get_arg_value(cat_node, 0, "tensors")
|
| 108 |
+
cat_dim = get_arg_value(cat_node, 1, "dim")
|
| 109 |
+
if cat_dim is None:
|
| 110 |
+
cat_axis = cat_node.kwargs.get("axis")
|
| 111 |
+
if cat_axis is not None:
|
| 112 |
+
cat_dim = cat_axis
|
| 113 |
+
else:
|
| 114 |
+
cat_dim = 0
|
| 115 |
+
if tensors is None or cat_dim is None:
|
| 116 |
+
log.info("couldn't find cat args")
|
| 117 |
+
return
|
| 118 |
+
assert isinstance(tensors, (list, tuple))
|
| 119 |
+
for tensor in itertools.chain([cat_node], tensors):
|
| 120 |
+
if "example_value" not in tensor.meta:
|
| 121 |
+
log.warning("example value absent for node: %s", tensor)
|
| 122 |
+
return
|
| 123 |
+
|
| 124 |
+
ndim = cat_node.meta["example_value"].dim()
|
| 125 |
+
|
| 126 |
+
def is_empty_tensor(x):
|
| 127 |
+
# special case where torch.cat supports cat'ing with an empty tensor
|
| 128 |
+
x_shape = x.meta["example_value"].shape
|
| 129 |
+
return len(x_shape) == 1 and x_shape[0] == 0
|
| 130 |
+
|
| 131 |
+
assert all(
|
| 132 |
+
ndim == x.meta["example_value"].dim() or is_empty_tensor(x) for x in tensors
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
if cat_dim < 0: # Normalize cat dim
|
| 136 |
+
cat_dim += ndim
|
| 137 |
+
|
| 138 |
+
with graph.inserting_after(cat_node):
|
| 139 |
+
new_cat_node = graph.call_function(
|
| 140 |
+
torch.cat,
|
| 141 |
+
args=(tensors,),
|
| 142 |
+
kwargs={"dim": cat_dim},
|
| 143 |
+
)
|
| 144 |
+
cat_node.replace_all_uses_with(new_cat_node)
|
| 145 |
+
new_cat_node.meta.update(cat_node.meta)
|
| 146 |
+
graph.erase_node(cat_node)
|
| 147 |
+
counters["inductor"]["split_cat_norm"] += 1
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def find_next_users(split_node):
|
| 151 |
+
next_users = []
|
| 152 |
+
for getitem_node in split_node.users.keys():
|
| 153 |
+
for getitem_user in getitem_node.users.keys():
|
| 154 |
+
if getitem_user not in next_users:
|
| 155 |
+
next_users.append(getitem_user)
|
| 156 |
+
return next_users
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
@register_graph_pattern(
|
| 160 |
+
CallMethodVarArgs("squeeze", users=MULTIPLE),
|
| 161 |
+
pass_dict=normalization_pass,
|
| 162 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 163 |
+
)
|
| 164 |
+
def normalize_squeeze_default(match: Match, *args, **kwargs):
|
| 165 |
+
squeeze_node = match.nodes[0]
|
| 166 |
+
squeeze_input = get_arg_value(squeeze_node, 0)
|
| 167 |
+
|
| 168 |
+
if "dim" in squeeze_node.kwargs:
|
| 169 |
+
assert len(squeeze_node.args) == 1
|
| 170 |
+
dim = squeeze_node.kwargs["dim"]
|
| 171 |
+
elif len(squeeze_node.args) == 1:
|
| 172 |
+
# squeeze(Tensor)
|
| 173 |
+
dim = None
|
| 174 |
+
elif len(squeeze_node.args) == 2:
|
| 175 |
+
# squeeze(Tensor self, int dim)
|
| 176 |
+
# squeeze(Tensor self, int[] dim)
|
| 177 |
+
dim = squeeze_node.args[1]
|
| 178 |
+
else:
|
| 179 |
+
# squeeze(Tensor self, int[] dim) (called with varargs)
|
| 180 |
+
dim = squeeze_node.args[1:]
|
| 181 |
+
|
| 182 |
+
if isinstance(dim, Sequence) and len(dim) == 1:
|
| 183 |
+
dim = dim[0]
|
| 184 |
+
|
| 185 |
+
with match.graph.inserting_after(squeeze_node):
|
| 186 |
+
if dim is None:
|
| 187 |
+
new_squeeze_node = match.graph.call_function(
|
| 188 |
+
torch.squeeze, args=(squeeze_input,)
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
new_squeeze_node = match.graph.call_function(
|
| 192 |
+
torch.squeeze, args=(squeeze_input, dim)
|
| 193 |
+
)
|
| 194 |
+
squeeze_node.replace_all_uses_with(new_squeeze_node)
|
| 195 |
+
match.graph.erase_node(squeeze_node)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class TorchSplit(CallFunction):
|
| 199 |
+
"""
|
| 200 |
+
Matches a call to torch.split if it is in a normalized form. Ensures that all users of
|
| 201 |
+
splits are unique getitems.
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
def __init__(self, arg, sizes):
|
| 205 |
+
# using KeywordArg("dim") for `dim` checks they all match
|
| 206 |
+
super().__init__(
|
| 207 |
+
torch.split, arg, sizes, _users=MULTIPLE, dim=KeywordArg("dim")
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
def _match(self, node: torch.fx.Node, ctx: MatchContext):
|
| 211 |
+
m = super()._match(node, ctx)
|
| 212 |
+
if not m:
|
| 213 |
+
return m
|
| 214 |
+
split_sections = node.args[1]
|
| 215 |
+
if not isinstance(split_sections, (list, tuple)):
|
| 216 |
+
return FailedMatch("split not normalized")
|
| 217 |
+
# check users are all unique getitems
|
| 218 |
+
seen_idxs = set()
|
| 219 |
+
for user in node.users:
|
| 220 |
+
if not CallFunction(operator.getitem, Arg(), Arg()).match(user):
|
| 221 |
+
# This should ideally never happen. Split user should always be a getitem
|
| 222 |
+
return FailedMatch(f"user of split not a getitem: {user}")
|
| 223 |
+
if not isinstance(user.args[1], int):
|
| 224 |
+
return FailedMatch("only integer getitems are handled")
|
| 225 |
+
if user.args[1] in seen_idxs:
|
| 226 |
+
return FailedMatch(f"duplicate getitem {user.args[1]}")
|
| 227 |
+
if user.args[-1] < 0:
|
| 228 |
+
# This shouldn't ideally happen as dynamo normalizes indexes to positive
|
| 229 |
+
return FailedMatch("negative index")
|
| 230 |
+
seen_idxs.add(user.args[1])
|
| 231 |
+
return m
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@register_graph_pattern(
|
| 235 |
+
TorchSplit(
|
| 236 |
+
CallFunction(
|
| 237 |
+
operator.getitem,
|
| 238 |
+
TorchSplit(
|
| 239 |
+
KeywordArg("first_split_input"),
|
| 240 |
+
KeywordArg("first_split_sections"),
|
| 241 |
+
),
|
| 242 |
+
Ignored(),
|
| 243 |
+
),
|
| 244 |
+
KeywordArg("next_split_sections"),
|
| 245 |
+
),
|
| 246 |
+
pass_dict=merge_splits_pass,
|
| 247 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 248 |
+
)
|
| 249 |
+
def merge_splits(
|
| 250 |
+
match: Match,
|
| 251 |
+
first_split_input: torch.fx.Node,
|
| 252 |
+
first_split_sections: List[int],
|
| 253 |
+
next_split_sections: List[int],
|
| 254 |
+
# Note: dim is implicitly passed by TorchSplit, as it internally uses a pattern with dim
|
| 255 |
+
dim: int,
|
| 256 |
+
):
|
| 257 |
+
node = match.output_node()
|
| 258 |
+
graph = match.graph
|
| 259 |
+
first_split = node.args[0].args[0]
|
| 260 |
+
next_split_index = node.args[0].args[1]
|
| 261 |
+
|
| 262 |
+
new_split_sections = list(first_split_sections)
|
| 263 |
+
new_split_sections[next_split_index : next_split_index + 1] = next_split_sections
|
| 264 |
+
|
| 265 |
+
first_split_dim = first_split.kwargs["dim"]
|
| 266 |
+
|
| 267 |
+
to_remove = []
|
| 268 |
+
|
| 269 |
+
with graph.inserting_before(first_split):
|
| 270 |
+
# Add the new split node
|
| 271 |
+
new_split = graph.call_function(
|
| 272 |
+
torch.split,
|
| 273 |
+
args=(first_split_input, new_split_sections),
|
| 274 |
+
kwargs={"dim": first_split_dim},
|
| 275 |
+
)
|
| 276 |
+
first_split_num_to_user = {
|
| 277 |
+
user.args[1]: user for user in first_split.users.keys()
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
new_split_num = 0
|
| 281 |
+
for split_num in range(len(first_split_sections)):
|
| 282 |
+
if split_num not in first_split_num_to_user:
|
| 283 |
+
new_split_num += 1
|
| 284 |
+
continue
|
| 285 |
+
old_getitem = first_split_num_to_user[split_num]
|
| 286 |
+
if split_num != next_split_index:
|
| 287 |
+
old_getitem.update_arg(0, new_split)
|
| 288 |
+
old_getitem.update_arg(1, new_split_num)
|
| 289 |
+
new_split_num += 1
|
| 290 |
+
else:
|
| 291 |
+
next_split_num_to_user = {
|
| 292 |
+
user.args[1]: user for user in node.users.keys()
|
| 293 |
+
}
|
| 294 |
+
for next_split_num in range(len(next_split_sections)):
|
| 295 |
+
with graph.inserting_after(new_split):
|
| 296 |
+
new_getitem = graph.call_function(
|
| 297 |
+
operator.getitem, args=(new_split, new_split_num)
|
| 298 |
+
)
|
| 299 |
+
new_split_num += 1
|
| 300 |
+
next_getitem = next_split_num_to_user[next_split_num]
|
| 301 |
+
new_getitem.meta.update(next_getitem.meta)
|
| 302 |
+
next_getitem.replace_all_uses_with(new_getitem)
|
| 303 |
+
to_remove.append(next_getitem)
|
| 304 |
+
to_remove.append(node)
|
| 305 |
+
to_remove.append(old_getitem)
|
| 306 |
+
|
| 307 |
+
to_remove.append(first_split)
|
| 308 |
+
for node in to_remove:
|
| 309 |
+
graph.erase_node(node)
|
| 310 |
+
|
| 311 |
+
counters["inductor"]["consecutive_split_merged"] += 1
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
class SplitCatSimplifier:
|
| 315 |
+
"""
|
| 316 |
+
Helper class to simplify split-cat pattern. In simple cases, both split and cat node can be removed in a "split->cat"
|
| 317 |
+
pattern. However, there are various cases where they can't and we need to simplify split/ add transforms before cat.
|
| 318 |
+
Some such cases are:
|
| 319 |
+
1. Final node has additional args (not coming from the initial split)
|
| 320 |
+
2. Shuffling of args between split/cat
|
| 321 |
+
3. Some final nodes are non-(cat/stack)
|
| 322 |
+
4. Split-dim != cat-dim (but equal split)
|
| 323 |
+
|
| 324 |
+
Note that any combination of the above cases can happen.
|
| 325 |
+
|
| 326 |
+
To deal with 1, 2, & 3 - we iterate over all users of split. And figure out common "ranges" that can be merged.
|
| 327 |
+
Then, we simplify the split accordingly. In the best case, split can be entirely removed.
|
| 328 |
+
|
| 329 |
+
To deal with 4, we add some transformations (unflatten + movedim) (See `get_transform_params`).
|
| 330 |
+
|
| 331 |
+
Finally, depending on final node being cat or stack, unsqueeze/flatten needs to be added.
|
| 332 |
+
|
| 333 |
+
"""
|
| 334 |
+
|
| 335 |
+
def simplify(
|
| 336 |
+
self,
|
| 337 |
+
graph: torch.fx.Graph,
|
| 338 |
+
split_node: torch.fx.Node,
|
| 339 |
+
split_sections: List[int],
|
| 340 |
+
):
|
| 341 |
+
# Find the next users (i.e. users after the getitem)
|
| 342 |
+
next_users = find_next_users(split_node)
|
| 343 |
+
# Gather inputs of the next users. When inputs come from `split_node`, they are instead represented by
|
| 344 |
+
# a tuple indicating the split ranges. See `get_user_input_list` for more details
|
| 345 |
+
user_inputs_list = self.get_user_input_list(split_node, next_users)
|
| 346 |
+
# Simplify the split_sections based on user_inputs_list. In simpler cases, len(simplified_split_ranges) == 1 and
|
| 347 |
+
# we can simply replace the split node. Otherwise, we simplify it.
|
| 348 |
+
simplified_split_ranges = self.get_simplified_split_ranges(
|
| 349 |
+
split_sections, next_users, user_inputs_list
|
| 350 |
+
)
|
| 351 |
+
if not simplified_split_ranges: # Simplification not possible
|
| 352 |
+
return
|
| 353 |
+
transform_params_list = self.get_transform_params(
|
| 354 |
+
split_node, next_users, user_inputs_list
|
| 355 |
+
)
|
| 356 |
+
if not transform_params_list:
|
| 357 |
+
return
|
| 358 |
+
|
| 359 |
+
# Start actual replacement
|
| 360 |
+
user_inputs_list_new = self.replace_split(
|
| 361 |
+
graph, split_node, split_sections, user_inputs_list, simplified_split_ranges
|
| 362 |
+
)
|
| 363 |
+
self.replace_cat(
|
| 364 |
+
graph, split_node, next_users, user_inputs_list_new, transform_params_list
|
| 365 |
+
)
|
| 366 |
+
self.erase_old_nodes(graph, split_node, next_users)
|
| 367 |
+
|
| 368 |
+
def get_user_input_list(
|
| 369 |
+
self, split_node, next_users
|
| 370 |
+
) -> List[List[Union[torch.fx.Node, Tuple[int, int]]]]:
|
| 371 |
+
"""
|
| 372 |
+
Returns list of inputs to the following user nodes, in order. The outer list represents the user node. The inner
|
| 373 |
+
list represents the inputs to that particular node. This list can either contain
|
| 374 |
+
- a tuple representing the ranges of get_items that should go into the cat (closed interval)
|
| 375 |
+
- torch.fx.Node representing "other" inputs (which are not coming from our split)
|
| 376 |
+
"""
|
| 377 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]] = []
|
| 378 |
+
for user in next_users:
|
| 379 |
+
if user.target in {torch.cat, torch.stack}:
|
| 380 |
+
user_inputs_list.append(self.get_merged_user_inputs(split_node, user))
|
| 381 |
+
else:
|
| 382 |
+
user_inputs_list.append(self.get_non_cat_node_input(split_node, user))
|
| 383 |
+
return user_inputs_list
|
| 384 |
+
|
| 385 |
+
def get_merged_user_inputs(
|
| 386 |
+
self, split_node: torch.fx.Node, cat_node: torch.fx.Node
|
| 387 |
+
) -> List[Union[torch.fx.Node, Tuple[int, int]]]:
|
| 388 |
+
user_inputs = get_arg_value(cat_node, 0, "tensors")
|
| 389 |
+
simplified_user_inputs = []
|
| 390 |
+
split_users = set(split_node.users.keys())
|
| 391 |
+
for user_input in user_inputs:
|
| 392 |
+
if user_input not in split_users:
|
| 393 |
+
simplified_user_inputs.append(user_input)
|
| 394 |
+
else:
|
| 395 |
+
# Add which "getitem" cat depends on
|
| 396 |
+
simplified_user_inputs.append(user_input.args[1])
|
| 397 |
+
return self.merge_consecutive_inputs(simplified_user_inputs)
|
| 398 |
+
|
| 399 |
+
def get_non_cat_node_input(
|
| 400 |
+
self, split_node: torch.fx.Node, node: torch.fx.Node
|
| 401 |
+
) -> List[Tuple[int, int]]:
|
| 402 |
+
"""
|
| 403 |
+
Get input for a non cat node in the same format as `get_merged_user_inputs`
|
| 404 |
+
"""
|
| 405 |
+
node_input = []
|
| 406 |
+
split_users = set(split_node.users.keys())
|
| 407 |
+
for node_arg in node.all_input_nodes:
|
| 408 |
+
if node_arg in split_users:
|
| 409 |
+
getitem_num = get_arg_value(node_arg, 1)
|
| 410 |
+
node_input.append((getitem_num, getitem_num))
|
| 411 |
+
return node_input
|
| 412 |
+
|
| 413 |
+
def merge_consecutive_inputs(
|
| 414 |
+
self, inputs: List[Union[torch.fx.Node, int]]
|
| 415 |
+
) -> List[Union[torch.fx.Node, Tuple[int, int]]]:
|
| 416 |
+
"""
|
| 417 |
+
Merge consecutive inputs going into a user node.
|
| 418 |
+
|
| 419 |
+
For e.g.
|
| 420 |
+
[arg0, 0, 1, 2, arg1] -> [arg0, (0, 2), arg1]
|
| 421 |
+
"""
|
| 422 |
+
merged_ranges = []
|
| 423 |
+
cur_range = None
|
| 424 |
+
for input_ in inputs:
|
| 425 |
+
if isinstance(input_, int):
|
| 426 |
+
if not cur_range:
|
| 427 |
+
cur_range = [input_, input_]
|
| 428 |
+
elif input_ == cur_range[1] + 1:
|
| 429 |
+
cur_range[1] += 1
|
| 430 |
+
else:
|
| 431 |
+
merged_ranges.append(tuple(cur_range))
|
| 432 |
+
cur_range = [input_, input_]
|
| 433 |
+
else:
|
| 434 |
+
if cur_range:
|
| 435 |
+
merged_ranges.append(tuple(cur_range))
|
| 436 |
+
cur_range = None
|
| 437 |
+
merged_ranges.append(input_)
|
| 438 |
+
if cur_range:
|
| 439 |
+
merged_ranges.append(tuple(cur_range))
|
| 440 |
+
return merged_ranges
|
| 441 |
+
|
| 442 |
+
def get_simplified_split_ranges(
|
| 443 |
+
self,
|
| 444 |
+
split_sections,
|
| 445 |
+
next_users,
|
| 446 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]],
|
| 447 |
+
) -> List[Tuple[int, int]]:
|
| 448 |
+
ranges = set()
|
| 449 |
+
for user_node, user_inputs in zip(next_users, user_inputs_list):
|
| 450 |
+
ranges |= {
|
| 451 |
+
user_input
|
| 452 |
+
for user_input in user_inputs
|
| 453 |
+
if isinstance(user_input, tuple)
|
| 454 |
+
}
|
| 455 |
+
cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()
|
| 456 |
+
split_ranges = sorted(
|
| 457 |
+
[(cumulative_sizes[r[0]], cumulative_sizes[r[1] + 1]) for r in ranges]
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
if not self.has_non_overlapping_ranges(
|
| 461 |
+
split_ranges,
|
| 462 |
+
): # This need not be a strict condition
|
| 463 |
+
# However, we keep it now for simplicity.
|
| 464 |
+
return
|
| 465 |
+
split_ranges = self.fill_gaps(split_ranges, 0, cumulative_sizes[-1])
|
| 466 |
+
if len(split_sections) == len(split_ranges): # Simplification not possible
|
| 467 |
+
return
|
| 468 |
+
counters["inductor"]["scmerge_split_sections_removed"] = len(
|
| 469 |
+
split_sections
|
| 470 |
+
) - len(split_ranges)
|
| 471 |
+
return split_ranges
|
| 472 |
+
|
| 473 |
+
def has_non_overlapping_ranges(self, ranges: List[Tuple[int, int]]):
|
| 474 |
+
for range_, next_range in zip(ranges, ranges[1:]):
|
| 475 |
+
if range_[1] > next_range[0]:
|
| 476 |
+
return False
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
def fill_gaps(self, ranges, min_, max_):
|
| 480 |
+
cur = min_
|
| 481 |
+
filled_ranges = []
|
| 482 |
+
for a, b in ranges:
|
| 483 |
+
if cur < a:
|
| 484 |
+
filled_ranges.append((cur, a))
|
| 485 |
+
filled_ranges.append((a, b))
|
| 486 |
+
cur = b
|
| 487 |
+
if filled_ranges[-1][1] < max_:
|
| 488 |
+
filled_ranges.append((filled_ranges[-1][1], max_))
|
| 489 |
+
return filled_ranges
|
| 490 |
+
|
| 491 |
+
def get_transform_params(
|
| 492 |
+
self,
|
| 493 |
+
split_node: torch.fx.Node,
|
| 494 |
+
next_users: List[torch.fx.Node],
|
| 495 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]],
|
| 496 |
+
) -> List[List[Tuple]]:
|
| 497 |
+
"""
|
| 498 |
+
Figure out what transforms are needed for each input to each cat node.
|
| 499 |
+
|
| 500 |
+
We replace a split node with an unflatten followed by a movedim
|
| 501 |
+
"""
|
| 502 |
+
split_dim = split_node.kwargs["dim"]
|
| 503 |
+
split_sections = split_node.args[1]
|
| 504 |
+
transform_params_list = []
|
| 505 |
+
for user_node, user_inputs in zip(next_users, user_inputs_list):
|
| 506 |
+
if user_node.target not in {torch.cat, torch.stack}:
|
| 507 |
+
transform_params_list.append(None)
|
| 508 |
+
continue
|
| 509 |
+
|
| 510 |
+
cat_dim = get_arg_value(user_node, 1, "dim")
|
| 511 |
+
transform_params = []
|
| 512 |
+
for user_input in user_inputs:
|
| 513 |
+
if split_dim == cat_dim and user_node.target == torch.cat:
|
| 514 |
+
# No transform needed
|
| 515 |
+
transform_params.append((None, None, None, None))
|
| 516 |
+
elif isinstance(user_input, tuple): # Split being simplified
|
| 517 |
+
# Verify equal split
|
| 518 |
+
subset_split_sections = split_sections[
|
| 519 |
+
user_input[0] : user_input[1] + 1
|
| 520 |
+
]
|
| 521 |
+
# All sections should be equal
|
| 522 |
+
if len(set(subset_split_sections)) != 1:
|
| 523 |
+
return
|
| 524 |
+
|
| 525 |
+
num_splits = len(subset_split_sections)
|
| 526 |
+
unflatten_params = (split_dim, (num_splits, -1))
|
| 527 |
+
movedim_params = (
|
| 528 |
+
(split_dim, cat_dim) if split_dim != cat_dim else None
|
| 529 |
+
)
|
| 530 |
+
transform_params.append(
|
| 531 |
+
(unflatten_params, movedim_params, None, None)
|
| 532 |
+
)
|
| 533 |
+
elif (
|
| 534 |
+
user_node.target == torch.stack or split_dim != cat_dim
|
| 535 |
+
): # We need to unsqueeze inputs not coming through split
|
| 536 |
+
transform_params.append((None, None, (cat_dim,), None))
|
| 537 |
+
else: # Non-split inputs
|
| 538 |
+
transform_params.append((None, None, None, None))
|
| 539 |
+
transform_params_list.append(transform_params)
|
| 540 |
+
return transform_params_list
|
| 541 |
+
|
| 542 |
+
def replace_split(
|
| 543 |
+
self,
|
| 544 |
+
graph: torch.fx.Graph,
|
| 545 |
+
split_node: torch.fx.Node,
|
| 546 |
+
split_sections: List[int],
|
| 547 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]],
|
| 548 |
+
split_ranges: List[Tuple[int, int]],
|
| 549 |
+
) -> List[List[torch.fx.Node]]:
|
| 550 |
+
"""
|
| 551 |
+
Replace the split node. It can either remove the split node if len(split_ranges) == 1, or simplify it
|
| 552 |
+
into a split with lesser sections if len(split_ranges) > 1.
|
| 553 |
+
|
| 554 |
+
Returns the new `user_inputs_list`, with tuples replaced with new getitems from the newer split node.
|
| 555 |
+
"""
|
| 556 |
+
split_input = split_node.args[0]
|
| 557 |
+
split_dim = split_node.kwargs["dim"]
|
| 558 |
+
if len(split_ranges) == 1: # We can completely eliminate the split node
|
| 559 |
+
split_items = [split_input]
|
| 560 |
+
else:
|
| 561 |
+
with graph.inserting_after(split_node):
|
| 562 |
+
new_split = graph.call_function(
|
| 563 |
+
torch.split,
|
| 564 |
+
args=(
|
| 565 |
+
split_input,
|
| 566 |
+
[r[1] - r[0] for r in split_ranges],
|
| 567 |
+
split_dim,
|
| 568 |
+
),
|
| 569 |
+
)
|
| 570 |
+
new_split.meta.update(split_node.meta)
|
| 571 |
+
counters["inductor"]["scmerge_split_added"] += 1
|
| 572 |
+
with graph.inserting_after(new_split):
|
| 573 |
+
split_items = [
|
| 574 |
+
graph.call_function(operator.getitem, args=(new_split, i))
|
| 575 |
+
for i in range(len(split_ranges))
|
| 576 |
+
]
|
| 577 |
+
# Now assign the right getitem to the right input
|
| 578 |
+
cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()
|
| 579 |
+
new_user_inputs_list = []
|
| 580 |
+
for user_inputs in user_inputs_list:
|
| 581 |
+
new_user_inputs = []
|
| 582 |
+
for user_input in user_inputs:
|
| 583 |
+
if isinstance(user_input, tuple):
|
| 584 |
+
# Find the correct new getitem (present in split_items)
|
| 585 |
+
new_user_inputs.append(
|
| 586 |
+
split_items[
|
| 587 |
+
split_ranges.index(
|
| 588 |
+
(
|
| 589 |
+
cumulative_sizes[user_input[0]],
|
| 590 |
+
cumulative_sizes[user_input[1] + 1],
|
| 591 |
+
)
|
| 592 |
+
)
|
| 593 |
+
]
|
| 594 |
+
)
|
| 595 |
+
else:
|
| 596 |
+
new_user_inputs.append(user_input)
|
| 597 |
+
new_user_inputs_list.append(new_user_inputs)
|
| 598 |
+
return new_user_inputs_list
|
| 599 |
+
|
| 600 |
+
def replace_cat(
|
| 601 |
+
self,
|
| 602 |
+
graph,
|
| 603 |
+
split_node,
|
| 604 |
+
next_users,
|
| 605 |
+
user_inputs_list_new,
|
| 606 |
+
transform_params_list,
|
| 607 |
+
):
|
| 608 |
+
split_dim = split_node.kwargs["dim"]
|
| 609 |
+
|
| 610 |
+
split_users = split_node.users.keys()
|
| 611 |
+
new_cats = []
|
| 612 |
+
for user_node, user_inputs_new, transform_params in zip(
|
| 613 |
+
next_users, user_inputs_list_new, transform_params_list
|
| 614 |
+
):
|
| 615 |
+
if user_node.target not in {torch.cat, torch.stack}:
|
| 616 |
+
# Change the args and kwargs of non-cat/stack nodes. Replace old getitems (belonging to
|
| 617 |
+
# the original split node) with the newer getitems
|
| 618 |
+
next_cat_input = 0
|
| 619 |
+
for input_node in user_node.all_input_nodes:
|
| 620 |
+
if input_node in split_users:
|
| 621 |
+
user_node.replace_input_with(
|
| 622 |
+
input_node, user_inputs_new[next_cat_input]
|
| 623 |
+
)
|
| 624 |
+
next_cat_input += 1
|
| 625 |
+
continue
|
| 626 |
+
|
| 627 |
+
# Handle cat/stack user nodes
|
| 628 |
+
cat_dim = get_arg_value(user_node, 1, "dim")
|
| 629 |
+
user_inputs_new_transformed = []
|
| 630 |
+
# For `unsqueeze` transform, we will combine consecutive inputs with the same unsqueeze params, and stack them
|
| 631 |
+
to_stack = []
|
| 632 |
+
stack_dim = None
|
| 633 |
+
with graph.inserting_before(user_node):
|
| 634 |
+
for user_input_new, transform_param in zip(
|
| 635 |
+
user_inputs_new, transform_params
|
| 636 |
+
):
|
| 637 |
+
# Apply transforms
|
| 638 |
+
(
|
| 639 |
+
unflatten_params,
|
| 640 |
+
movedim_params,
|
| 641 |
+
unsqueeze_params,
|
| 642 |
+
flatten_params,
|
| 643 |
+
) = transform_param
|
| 644 |
+
if unsqueeze_params and (
|
| 645 |
+
stack_dim is None or stack_dim == unsqueeze_params[0]
|
| 646 |
+
):
|
| 647 |
+
to_stack.append(user_input_new)
|
| 648 |
+
stack_dim = unsqueeze_params[0]
|
| 649 |
+
continue
|
| 650 |
+
elif to_stack:
|
| 651 |
+
stacked_input = graph.call_function(
|
| 652 |
+
torch.stack, args=(to_stack, stack_dim)
|
| 653 |
+
)
|
| 654 |
+
to_stack = []
|
| 655 |
+
stack_dim = None
|
| 656 |
+
user_inputs_new_transformed.append(stacked_input)
|
| 657 |
+
if unsqueeze_params:
|
| 658 |
+
to_stack.append(user_input_new)
|
| 659 |
+
stack_dim = unsqueeze_params[0]
|
| 660 |
+
continue
|
| 661 |
+
|
| 662 |
+
if unflatten_params:
|
| 663 |
+
user_input_new = graph.call_function(
|
| 664 |
+
torch.unflatten, args=(user_input_new, *unflatten_params)
|
| 665 |
+
)
|
| 666 |
+
if movedim_params:
|
| 667 |
+
user_input_new = graph.call_function(
|
| 668 |
+
torch.movedim, args=(user_input_new, *movedim_params)
|
| 669 |
+
)
|
| 670 |
+
if flatten_params:
|
| 671 |
+
user_input_new = graph.call_function(
|
| 672 |
+
torch.flatten, args=(user_input_new, *flatten_params)
|
| 673 |
+
)
|
| 674 |
+
user_inputs_new_transformed.append(user_input_new)
|
| 675 |
+
if to_stack:
|
| 676 |
+
stacked_input = graph.call_function(
|
| 677 |
+
torch.stack, args=(to_stack, stack_dim)
|
| 678 |
+
)
|
| 679 |
+
user_inputs_new_transformed.append(stacked_input)
|
| 680 |
+
|
| 681 |
+
with graph.inserting_after(user_node):
|
| 682 |
+
if len(user_inputs_new_transformed) > 1:
|
| 683 |
+
new_cat_node = graph.call_function(
|
| 684 |
+
torch.cat, args=(user_inputs_new_transformed, cat_dim)
|
| 685 |
+
)
|
| 686 |
+
new_cat_node.meta.update(user_node.meta)
|
| 687 |
+
counters["inductor"]["scmerge_cat_added"] += 1
|
| 688 |
+
else:
|
| 689 |
+
new_cat_node = user_inputs_new_transformed[-1]
|
| 690 |
+
|
| 691 |
+
if (
|
| 692 |
+
user_node.target == torch.cat
|
| 693 |
+
and split_dim != cat_dim
|
| 694 |
+
and split_node.target == torch.split
|
| 695 |
+
):
|
| 696 |
+
with graph.inserting_after(new_cat_node):
|
| 697 |
+
new_cat_node = graph.call_function(
|
| 698 |
+
torch.flatten, args=(new_cat_node, cat_dim, cat_dim + 1)
|
| 699 |
+
)
|
| 700 |
+
user_node.replace_all_uses_with(new_cat_node)
|
| 701 |
+
new_cats.append(new_cat_node)
|
| 702 |
+
|
| 703 |
+
def erase_old_nodes(self, graph, split_node, next_users):
|
| 704 |
+
to_remove = [split_node]
|
| 705 |
+
counters["inductor"]["scmerge_split_removed"] += 1
|
| 706 |
+
for getitem_node in split_node.users.keys():
|
| 707 |
+
to_remove.append(getitem_node)
|
| 708 |
+
for next_user in next_users:
|
| 709 |
+
if next_user.target not in {torch.cat, torch.stack}:
|
| 710 |
+
continue
|
| 711 |
+
counters["inductor"]["scmerge_cat_removed"] += 1
|
| 712 |
+
to_remove.append(next_user)
|
| 713 |
+
for node in reversed(to_remove):
|
| 714 |
+
graph.erase_node(node)
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
class UnbindCatRemover(SplitCatSimplifier):
|
| 718 |
+
"""
|
| 719 |
+
Helper class to merge Unbind->Cat/Stack. Many of the cases are similar to SplitCatSimplifier.
|
| 720 |
+
|
| 721 |
+
Unbind can't be simplified like splits. So, we can only remove the unbind node. Other than this,
|
| 722 |
+
other cases like multiple users, additional args, dim mismatch are similar to `SplitCatSimplifier`,
|
| 723 |
+
hence we extend that class.
|
| 724 |
+
"""
|
| 725 |
+
|
| 726 |
+
def remove_unbind(
|
| 727 |
+
self,
|
| 728 |
+
graph: torch.fx.Graph,
|
| 729 |
+
unbind_node: torch.fx.Node,
|
| 730 |
+
):
|
| 731 |
+
num_unbind = (
|
| 732 |
+
max(getitem_node.args[1] for getitem_node in unbind_node.users.keys()) + 1
|
| 733 |
+
)
|
| 734 |
+
split_sections = [1 for _ in range(num_unbind)]
|
| 735 |
+
|
| 736 |
+
super().simplify(graph, unbind_node, split_sections)
|
| 737 |
+
|
| 738 |
+
def get_simplified_split_ranges(
|
| 739 |
+
self,
|
| 740 |
+
split_sections,
|
| 741 |
+
next_users,
|
| 742 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]],
|
| 743 |
+
) -> List[Tuple[int, int]]:
|
| 744 |
+
simplified_split_ranges = super().get_simplified_split_ranges(
|
| 745 |
+
split_sections, next_users, user_inputs_list
|
| 746 |
+
)
|
| 747 |
+
if not simplified_split_ranges or len(simplified_split_ranges) != 1:
|
| 748 |
+
return None
|
| 749 |
+
return simplified_split_ranges
|
| 750 |
+
|
| 751 |
+
def get_transform_params(
|
| 752 |
+
self,
|
| 753 |
+
unbind_node: torch.fx.Node,
|
| 754 |
+
next_users: List[torch.fx.Node],
|
| 755 |
+
user_inputs_list: List[List[Union[torch.fx.Node, Tuple[int, int]]]],
|
| 756 |
+
) -> List[List[Tuple]]:
|
| 757 |
+
"""
|
| 758 |
+
Figure out what transforms are needed for each input to each cat node.
|
| 759 |
+
|
| 760 |
+
Here is the rough transforms we apply:
|
| 761 |
+
|
| 762 |
+
x -> unbind -> stack => x -> movedim
|
| 763 |
+
|
| 764 |
+
x -> unbind -> cat => x -> movedim -> flatten
|
| 765 |
+
|
| 766 |
+
When cat/stack nodes have additional args:
|
| 767 |
+
|
| 768 |
+
addn ---| addn -> unsqueeze ---|
|
| 769 |
+
x -> unbind -> stack => x -> movedim -> cat
|
| 770 |
+
|
| 771 |
+
addn ---| addn ---|
|
| 772 |
+
x -> unbind -> cat => x -> movedim -> flatten -> cat
|
| 773 |
+
|
| 774 |
+
(Note application of these depends on the dims as well)
|
| 775 |
+
|
| 776 |
+
|
| 777 |
+
"""
|
| 778 |
+
split_dim = unbind_node.kwargs["dim"]
|
| 779 |
+
transform_params_list = []
|
| 780 |
+
for user_node, user_inputs in zip(next_users, user_inputs_list):
|
| 781 |
+
cat_dim = get_arg_value(user_node, 1, "dim")
|
| 782 |
+
transform_params = []
|
| 783 |
+
for user_input in user_inputs:
|
| 784 |
+
if isinstance(user_input, tuple):
|
| 785 |
+
# User input is coming from unbind
|
| 786 |
+
movedim_params = (
|
| 787 |
+
(split_dim, cat_dim) if split_dim != cat_dim else None
|
| 788 |
+
)
|
| 789 |
+
flatten_params = None
|
| 790 |
+
if user_node.target == torch.cat:
|
| 791 |
+
flatten_params = (cat_dim, cat_dim + 1)
|
| 792 |
+
transform_params.append(
|
| 793 |
+
(None, movedim_params, None, flatten_params)
|
| 794 |
+
)
|
| 795 |
+
elif (
|
| 796 |
+
user_node.target == torch.stack
|
| 797 |
+
): # We need to unsqueeze inputs not coming through unbind into cat
|
| 798 |
+
transform_params.append((None, None, (cat_dim,), None))
|
| 799 |
+
else: # Non-unbind inputs
|
| 800 |
+
transform_params.append((None, None, None, None))
|
| 801 |
+
transform_params_list.append(transform_params)
|
| 802 |
+
return transform_params_list
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
class GetItem(CallFunction):
|
| 806 |
+
def __init__(self, arg, index, _users=1):
|
| 807 |
+
super().__init__(operator.getitem, arg, index, _users=_users)
|
| 808 |
+
|
| 809 |
+
def find_anchor_nodes(self, ctx: MatchContext, searched):
|
| 810 |
+
# We generally match GetItem with arg being an Arg(). So, we never return the anchor
|
| 811 |
+
# nodes as the stored node in ctx.pattern_to_node is returned. Here we override find_anchor_nodes
|
| 812 |
+
# to not use ctx.pattern_to_node
|
| 813 |
+
for pattern in self.flat_args_kwargs[0]:
|
| 814 |
+
if isinstance(pattern, PatternExpr):
|
| 815 |
+
for other_node in pattern.find_anchor_nodes(ctx, searched):
|
| 816 |
+
if not isinstance(other_node, torch.fx.Node):
|
| 817 |
+
continue
|
| 818 |
+
for node in other_node.users:
|
| 819 |
+
if node not in searched:
|
| 820 |
+
if self._match_fns(node):
|
| 821 |
+
yield node
|
| 822 |
+
searched.add(node)
|
| 823 |
+
|
| 824 |
+
|
| 825 |
+
@register_graph_pattern(
|
| 826 |
+
RepeatedExpr(
|
| 827 |
+
CallFunction(
|
| 828 |
+
torch.squeeze,
|
| 829 |
+
GetItem(
|
| 830 |
+
TorchSplit(
|
| 831 |
+
KeywordArg("split_input"),
|
| 832 |
+
KeywordArg("split_sizes"),
|
| 833 |
+
),
|
| 834 |
+
Ignored(),
|
| 835 |
+
),
|
| 836 |
+
KeywordArg("dim"),
|
| 837 |
+
_users=MULTIPLE,
|
| 838 |
+
),
|
| 839 |
+
),
|
| 840 |
+
pass_dict=split_cat_pass,
|
| 841 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 842 |
+
)
|
| 843 |
+
@register_graph_pattern(
|
| 844 |
+
RepeatedExpr(
|
| 845 |
+
CallFunction(
|
| 846 |
+
torch.squeeze,
|
| 847 |
+
GetItem(
|
| 848 |
+
TorchSplit(
|
| 849 |
+
KeywordArg("split_input"),
|
| 850 |
+
KeywordArg("split_sizes"),
|
| 851 |
+
),
|
| 852 |
+
Ignored(),
|
| 853 |
+
),
|
| 854 |
+
dim=KeywordArg("dim"),
|
| 855 |
+
_users=MULTIPLE,
|
| 856 |
+
)
|
| 857 |
+
),
|
| 858 |
+
pass_dict=split_cat_pass,
|
| 859 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 860 |
+
)
|
| 861 |
+
def merge_split_squeeze(
|
| 862 |
+
match: Match, split_input: torch.fx.Node, split_sizes: List[int], dim: int
|
| 863 |
+
):
|
| 864 |
+
graph = match.graph
|
| 865 |
+
split = next(node for node in match.nodes if node.target == torch.split)
|
| 866 |
+
if not all(s == 1 for s in split_sizes):
|
| 867 |
+
return
|
| 868 |
+
if isinstance(dim, Sequence):
|
| 869 |
+
return
|
| 870 |
+
next_users = find_next_users(split)
|
| 871 |
+
if not all(node.target == torch.squeeze for node in next_users):
|
| 872 |
+
return
|
| 873 |
+
with graph.inserting_before(match.output_node()):
|
| 874 |
+
unbind = graph.call_function(
|
| 875 |
+
torch.unbind, args=(split_input,), kwargs={"dim": dim}
|
| 876 |
+
)
|
| 877 |
+
for item_index, getitem_node in sorted(
|
| 878 |
+
[
|
| 879 |
+
(getitem_node.args[1], getitem_node)
|
| 880 |
+
for getitem_node in split.users.keys()
|
| 881 |
+
]
|
| 882 |
+
):
|
| 883 |
+
squeeze = next(iter(getitem_node.users.keys()))
|
| 884 |
+
new_get_item = graph.call_function(
|
| 885 |
+
operator.getitem, args=(unbind, item_index)
|
| 886 |
+
)
|
| 887 |
+
squeeze.replace_all_uses_with(new_get_item)
|
| 888 |
+
new_get_item.meta.update(squeeze.meta)
|
| 889 |
+
graph.erase_node(squeeze)
|
| 890 |
+
graph.erase_node(getitem_node)
|
| 891 |
+
graph.erase_node(split)
|
| 892 |
+
counters["inductor"]["split_squeeze_replaced"] += 1
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
getitem_unbind = ListOf(
|
| 896 |
+
GetItem(
|
| 897 |
+
CallFunction(
|
| 898 |
+
torch.unbind,
|
| 899 |
+
KeywordArg("unbind_input"),
|
| 900 |
+
dim=KeywordArg("dim"),
|
| 901 |
+
_users=MULTIPLE,
|
| 902 |
+
),
|
| 903 |
+
Ignored(),
|
| 904 |
+
_users=MULTIPLE,
|
| 905 |
+
),
|
| 906 |
+
partial=True,
|
| 907 |
+
)
|
| 908 |
+
|
| 909 |
+
|
| 910 |
+
@register_graph_pattern(
|
| 911 |
+
CallFunction([torch.stack, torch.cat], getitem_unbind, Ignored(), _users=MULTIPLE),
|
| 912 |
+
pass_dict=unbind_stack_pass,
|
| 913 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 914 |
+
)
|
| 915 |
+
@register_graph_pattern(
|
| 916 |
+
CallFunction(
|
| 917 |
+
[torch.stack, torch.cat], getitem_unbind, dim=Ignored(), _users=MULTIPLE
|
| 918 |
+
),
|
| 919 |
+
pass_dict=unbind_stack_pass,
|
| 920 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 921 |
+
)
|
| 922 |
+
@register_graph_pattern(
|
| 923 |
+
CallFunction(
|
| 924 |
+
[torch.stack, torch.cat], tensors=getitem_unbind, dim=Ignored(), _users=MULTIPLE
|
| 925 |
+
),
|
| 926 |
+
pass_dict=unbind_stack_pass,
|
| 927 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 928 |
+
)
|
| 929 |
+
def merge_unbind_stack(match: Match, unbind_input: torch.fx.Node, dim: int):
|
| 930 |
+
unbind_node = next(node for node in match.nodes if node.target == torch.unbind)
|
| 931 |
+
UnbindCatRemover().remove_unbind(match.graph, unbind_node)
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
getitem_split = ListOf(
|
| 935 |
+
CallFunction(
|
| 936 |
+
operator.getitem,
|
| 937 |
+
TorchSplit(
|
| 938 |
+
Ignored(),
|
| 939 |
+
KeywordArg("split_sections"),
|
| 940 |
+
),
|
| 941 |
+
Ignored(),
|
| 942 |
+
_users=MULTIPLE,
|
| 943 |
+
),
|
| 944 |
+
partial=True,
|
| 945 |
+
)
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
@register_graph_pattern(
|
| 949 |
+
CallFunction(
|
| 950 |
+
[torch.stack, torch.cat],
|
| 951 |
+
tensors=getitem_split,
|
| 952 |
+
dim=Ignored(),
|
| 953 |
+
_users=MULTIPLE,
|
| 954 |
+
),
|
| 955 |
+
pass_dict=split_cat_pass,
|
| 956 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 957 |
+
)
|
| 958 |
+
@register_graph_pattern(
|
| 959 |
+
CallFunction(
|
| 960 |
+
[torch.stack, torch.cat],
|
| 961 |
+
getitem_split,
|
| 962 |
+
dim=Ignored(),
|
| 963 |
+
_users=MULTIPLE,
|
| 964 |
+
),
|
| 965 |
+
pass_dict=split_cat_pass,
|
| 966 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 967 |
+
)
|
| 968 |
+
@register_graph_pattern(
|
| 969 |
+
CallFunction(
|
| 970 |
+
[torch.stack, torch.cat],
|
| 971 |
+
getitem_split,
|
| 972 |
+
Ignored(),
|
| 973 |
+
_users=MULTIPLE,
|
| 974 |
+
),
|
| 975 |
+
pass_dict=split_cat_pass,
|
| 976 |
+
extra_check=config_flag("split_cat_fx_passes"),
|
| 977 |
+
)
|
| 978 |
+
def simplify_split_cat(match: Match, split_sections: List[int], dim: int):
|
| 979 |
+
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
|
| 980 |
+
return
|
| 981 |
+
split_node = next(node for node in match.nodes if node.target == torch.split)
|
| 982 |
+
SplitCatSimplifier().simplify(match.graph, split_node, split_sections)
|
llava_next/lib/python3.10/site-packages/torch/_inductor/index_propagation.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This file implements the IndexPropagation ops handler, which wraps an
|
| 2 |
+
underlying handler to add a limited form of constant propagation, as well as
|
| 3 |
+
propagation of sympy expressions downstream of ops.index_expr calls.
|
| 4 |
+
|
| 5 |
+
For example, say we have the IR:
|
| 6 |
+
|
| 7 |
+
tmp0 = ops.index_expr(x, torch.int32)
|
| 8 |
+
tmp1 = ops.constant(2, torch.int32)
|
| 9 |
+
tmp2 = ops.mul(tmp0, tmp1)
|
| 10 |
+
tmp3 = ops.indirect_indexing(tmp2, x_size)
|
| 11 |
+
tmp4 = ops.load("buf0", tmp3)
|
| 12 |
+
|
| 13 |
+
The underlying handler would just see:
|
| 14 |
+
|
| 15 |
+
ops.load("buf0", x * 2)
|
| 16 |
+
|
| 17 |
+
This is limited by the set of operators handled in the sympy expression
|
| 18 |
+
printers. So simple operations like minimum and maximum cannot be translated to
|
| 19 |
+
SymPy expressions yet, despite sympy.Min and sympy.Max existing.
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
import itertools
|
| 23 |
+
from dataclasses import dataclass
|
| 24 |
+
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
|
| 25 |
+
|
| 26 |
+
import sympy
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
from torch._prims_common import is_boolean_dtype, is_integer_dtype
|
| 30 |
+
from torch.utils._sympy.functions import FloorDiv, ModularIndexing, Where
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class TypedExpr:
|
| 35 |
+
"""A SymPy expression with associated type"""
|
| 36 |
+
|
| 37 |
+
expr: sympy.Expr
|
| 38 |
+
dtype: torch.dtype
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class SymPyOps:
|
| 42 |
+
"""An ops handler where all IR values are SymPy expressions
|
| 43 |
+
|
| 44 |
+
When a value cannot be represented as a SymPy expression, the method is
|
| 45 |
+
either not defined, or returns NotImplemented
|
| 46 |
+
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
def identity(value: Any) -> Any:
|
| 51 |
+
return value
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def constant(value: Union[int, float, bool], dtype: torch.dtype) -> TypedExpr:
|
| 55 |
+
if is_boolean_dtype(dtype):
|
| 56 |
+
expr = sympy.Integer(bool(value))
|
| 57 |
+
elif is_integer_dtype(dtype):
|
| 58 |
+
expr = sympy.Integer(int(value))
|
| 59 |
+
else:
|
| 60 |
+
expr = sympy.Float(float(value))
|
| 61 |
+
return TypedExpr(expr, dtype)
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def index_expr(value: sympy.Expr, dtype: torch.dtype) -> Union[int, TypedExpr]:
|
| 65 |
+
if isinstance(value, int):
|
| 66 |
+
value = sympy.Integer(value)
|
| 67 |
+
return TypedExpr(value, dtype)
|
| 68 |
+
|
| 69 |
+
@staticmethod
|
| 70 |
+
def to_dtype(value: Any, dtype: torch.dtype) -> Union[int, TypedExpr]:
|
| 71 |
+
if isinstance(value.expr, (sympy.Integer, sympy.Float)):
|
| 72 |
+
return SymPyOps.constant(value.expr, dtype)
|
| 73 |
+
elif is_integer_dtype(dtype) and is_integer_dtype(value.dtype):
|
| 74 |
+
return SymPyOps.index_expr(value.expr, dtype)
|
| 75 |
+
else:
|
| 76 |
+
# TODO: Inductor doesn't handle floating point in sympy expressions well at the moment
|
| 77 |
+
return NotImplemented
|
| 78 |
+
|
| 79 |
+
@staticmethod
|
| 80 |
+
def square(x: TypedExpr) -> TypedExpr:
|
| 81 |
+
return TypedExpr(x.expr * x.expr, x.dtype)
|
| 82 |
+
|
| 83 |
+
@staticmethod
|
| 84 |
+
def add(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 85 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 86 |
+
return TypedExpr(x.expr + y.expr, result_type)
|
| 87 |
+
|
| 88 |
+
@staticmethod
|
| 89 |
+
def sub(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 90 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 91 |
+
return TypedExpr(x.expr - y.expr, result_type)
|
| 92 |
+
|
| 93 |
+
@staticmethod
|
| 94 |
+
def mul(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 95 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 96 |
+
return TypedExpr(x.expr * y.expr, result_type)
|
| 97 |
+
|
| 98 |
+
@staticmethod
|
| 99 |
+
def neg(x: TypedExpr) -> TypedExpr:
|
| 100 |
+
return TypedExpr(-x.expr, x.dtype)
|
| 101 |
+
|
| 102 |
+
@staticmethod
|
| 103 |
+
def floordiv(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 104 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 105 |
+
if not is_integer_dtype(result_type):
|
| 106 |
+
return NotImplemented
|
| 107 |
+
|
| 108 |
+
return TypedExpr(FloorDiv(x.expr, y.expr), result_type)
|
| 109 |
+
|
| 110 |
+
@staticmethod
|
| 111 |
+
def remainder(x: TypedExpr, y: TypedExpr) -> Optional[TypedExpr]:
|
| 112 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 113 |
+
if not is_integer_dtype(result_type):
|
| 114 |
+
return NotImplemented
|
| 115 |
+
|
| 116 |
+
result_expr = ModularIndexing(x.expr, sympy.Integer(1), y.expr)
|
| 117 |
+
return TypedExpr(result_expr, result_type)
|
| 118 |
+
|
| 119 |
+
@staticmethod
|
| 120 |
+
def minimum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 121 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 122 |
+
return TypedExpr(sympy.Min(x.expr, y.expr), result_type)
|
| 123 |
+
|
| 124 |
+
@staticmethod
|
| 125 |
+
def maximum(x: TypedExpr, y: TypedExpr) -> TypedExpr:
|
| 126 |
+
result_type = torch.promote_types(x.dtype, y.dtype)
|
| 127 |
+
return TypedExpr(sympy.Max(x.expr, y.expr), result_type)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@dataclass
|
| 131 |
+
class IndexPropVar:
|
| 132 |
+
value: Any # Either an IR value, or TypedExpr if is_symbolic is true
|
| 133 |
+
is_symbolic: bool = False
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def new_symbolic(expr: TypedExpr) -> "IndexPropVar":
|
| 137 |
+
return IndexPropVar(expr, is_symbolic=True)
|
| 138 |
+
|
| 139 |
+
def __post_init__(self):
|
| 140 |
+
assert not self.is_symbolic or isinstance(
|
| 141 |
+
self.value, TypedExpr
|
| 142 |
+
), "Symbolic IndexPropVar must contain a TypedExpr"
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class IndexPropagation:
|
| 146 |
+
"""Ops wrapper that tries to propagate constant and index_expr values through the computation.
|
| 147 |
+
|
| 148 |
+
This aims to maximize the compile time simplification possible, and convert
|
| 149 |
+
indirect indexing from arange into normal static indexing.
|
| 150 |
+
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def __init__(self, inner: Any):
|
| 154 |
+
self._inner = inner
|
| 155 |
+
|
| 156 |
+
def materialize_expr(self, expr: sympy.Expr, dtype: torch.dtype) -> Any:
|
| 157 |
+
# Construct a new constant/index_expr from the SymPy expression
|
| 158 |
+
if isinstance(expr, sympy.Integer):
|
| 159 |
+
return self._inner.constant(int(expr), dtype)
|
| 160 |
+
elif not expr.free_symbols:
|
| 161 |
+
return self._inner.constant(float(expr), dtype)
|
| 162 |
+
return self._inner.index_expr(expr, dtype)
|
| 163 |
+
|
| 164 |
+
def unwrap(self, a: Union[Any, IndexPropVar]) -> Any:
|
| 165 |
+
if isinstance(a, (list, tuple)):
|
| 166 |
+
return tuple(self.unwrap(v) for v in a)
|
| 167 |
+
|
| 168 |
+
if not isinstance(a, IndexPropVar):
|
| 169 |
+
return a
|
| 170 |
+
|
| 171 |
+
# Prefer the sympy representation if possible
|
| 172 |
+
if a.is_symbolic:
|
| 173 |
+
return self.materialize_expr(a.value.expr, a.value.dtype)
|
| 174 |
+
|
| 175 |
+
return a.value
|
| 176 |
+
|
| 177 |
+
def wrap(self, a: Any) -> Union[IndexPropVar, Sequence[IndexPropVar]]:
|
| 178 |
+
if isinstance(a, (list, tuple)):
|
| 179 |
+
return tuple(self.wrap(v) for v in a)
|
| 180 |
+
return IndexPropVar(a)
|
| 181 |
+
|
| 182 |
+
def fallback(
|
| 183 |
+
self, name: str, args: Tuple, kwargs: Dict[str, Any]
|
| 184 |
+
) -> Union[IndexPropVar, Tuple[IndexPropVar, ...]]:
|
| 185 |
+
# Fallback to the wrapped handler
|
| 186 |
+
new_args = [self.unwrap(a) for a in args]
|
| 187 |
+
new_kwargs = {k: self.unwrap(v) for k, v in kwargs.items()}
|
| 188 |
+
return self.wrap(getattr(self._inner, name)(*new_args, **new_kwargs))
|
| 189 |
+
|
| 190 |
+
def propagate_sympy(
|
| 191 |
+
self, name: str, args: Tuple, kwargs: Dict[str, Any]
|
| 192 |
+
) -> IndexPropVar:
|
| 193 |
+
# Build a new SymPy expression from this ops call
|
| 194 |
+
def unwrap(a: Union[Any, IndexPropVar]) -> Any:
|
| 195 |
+
if not isinstance(a, IndexPropVar):
|
| 196 |
+
return a
|
| 197 |
+
return a.value
|
| 198 |
+
|
| 199 |
+
new_args = [unwrap(a) for a in args]
|
| 200 |
+
new_kwargs = {k: unwrap(v) for k, v in kwargs.items()}
|
| 201 |
+
new_expr = getattr(SymPyOps, name)(*new_args, **new_kwargs)
|
| 202 |
+
is_valid_expr = new_expr is not NotImplemented and (
|
| 203 |
+
# Inductor doesn't expect floating point in sympy expressions, but
|
| 204 |
+
# allow floating point constants to be propagated
|
| 205 |
+
isinstance(new_expr.expr, sympy.Number)
|
| 206 |
+
or new_expr.expr.is_integer
|
| 207 |
+
)
|
| 208 |
+
if not is_valid_expr:
|
| 209 |
+
return self.fallback(name, args, kwargs)
|
| 210 |
+
return IndexPropVar.new_symbolic(new_expr)
|
| 211 |
+
|
| 212 |
+
def __getattr__(self, name: str) -> Callable[..., Union[Any, IndexPropVar]]:
|
| 213 |
+
def inner(*args: Any, **kwargs: Any) -> Union[Any, IndexPropVar]:
|
| 214 |
+
if not hasattr(SymPyOps, name):
|
| 215 |
+
return self.fallback(name, args, kwargs)
|
| 216 |
+
|
| 217 |
+
var_arguments = [
|
| 218 |
+
a
|
| 219 |
+
for a in itertools.chain(args, kwargs.values())
|
| 220 |
+
if isinstance(a, IndexPropVar)
|
| 221 |
+
]
|
| 222 |
+
if not all(v.is_symbolic for v in var_arguments):
|
| 223 |
+
return self.fallback(name, args, kwargs)
|
| 224 |
+
|
| 225 |
+
return self.propagate_sympy(name, args, kwargs)
|
| 226 |
+
|
| 227 |
+
return inner
|
| 228 |
+
|
| 229 |
+
def indirect_indexing(
|
| 230 |
+
self, index: Union[Any, IndexPropVar], size: Any, check: bool = True
|
| 231 |
+
) -> Any:
|
| 232 |
+
# nb. We do index + Where(...) rather than Where(idx >= 0, idx, idx + sz) because we don't have CSE
|
| 233 |
+
# for SymPy expressions, so we don't want to repeat idx too much
|
| 234 |
+
|
| 235 |
+
# indirect_indexing returns a sympy value, so no need to wrap in IndexPropVar here
|
| 236 |
+
if isinstance(index, IndexPropVar) and index.is_symbolic:
|
| 237 |
+
# If we are turning a indirect indexing into direct, we need to wrap it.
|
| 238 |
+
index = index.value.expr
|
| 239 |
+
return index + Where(index >= 0, 0, size)
|
| 240 |
+
return self.fallback("indirect_indexing", (index, size, check), {}).value
|
llava_next/lib/python3.10/site-packages/torch/_inductor/utils.py
ADDED
|
@@ -0,0 +1,1045 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import contextlib
|
| 3 |
+
import functools
|
| 4 |
+
import inspect
|
| 5 |
+
import itertools
|
| 6 |
+
import logging
|
| 7 |
+
import math
|
| 8 |
+
import operator
|
| 9 |
+
import os
|
| 10 |
+
import shutil
|
| 11 |
+
import sys
|
| 12 |
+
import tempfile
|
| 13 |
+
import textwrap
|
| 14 |
+
import time
|
| 15 |
+
import unittest
|
| 16 |
+
from io import StringIO
|
| 17 |
+
from typing import (
|
| 18 |
+
Any,
|
| 19 |
+
Callable,
|
| 20 |
+
Dict,
|
| 21 |
+
Iterable,
|
| 22 |
+
List,
|
| 23 |
+
NamedTuple,
|
| 24 |
+
Optional,
|
| 25 |
+
Set,
|
| 26 |
+
TypeVar,
|
| 27 |
+
Union,
|
| 28 |
+
ValuesView,
|
| 29 |
+
)
|
| 30 |
+
from unittest import mock
|
| 31 |
+
|
| 32 |
+
import sympy
|
| 33 |
+
|
| 34 |
+
import torch
|
| 35 |
+
from torch.fx.immutable_collections import immutable_dict, immutable_list
|
| 36 |
+
from torch.utils._sympy.functions import CleanDiv, FloorDiv, ModularIndexing
|
| 37 |
+
|
| 38 |
+
from . import config
|
| 39 |
+
from .cuda_properties import current_device, get_device_capability
|
| 40 |
+
|
| 41 |
+
log = logging.getLogger(__name__)
|
| 42 |
+
|
| 43 |
+
_T = TypeVar("_T")
|
| 44 |
+
VarRanges = Dict[sympy.Expr, sympy.Expr]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def do_bench(*args, **kwargs):
|
| 48 |
+
@functools.lru_cache(None)
|
| 49 |
+
def load_triton():
|
| 50 |
+
try:
|
| 51 |
+
# NB: Lazily load triton, as importing triton is slow
|
| 52 |
+
# see https://github.com/openai/triton/issues/1599
|
| 53 |
+
from triton.testing import do_bench as triton_do_bench
|
| 54 |
+
except ImportError:
|
| 55 |
+
raise NotImplementedError("requires Triton")
|
| 56 |
+
|
| 57 |
+
# triton PR https://github.com/openai/triton/pull/1513 change the
|
| 58 |
+
# quantile fields name from 'percentiles' to 'quantiles'
|
| 59 |
+
# and change the default value from (0.5, 0.2, 0.8) to None.
|
| 60 |
+
# This may break inductor since a caller expects a tuple may get a item.
|
| 61 |
+
#
|
| 62 |
+
# Add a wrapper to maintain the same behavior for inductor.
|
| 63 |
+
# Maybe we should have own implementation of this function?
|
| 64 |
+
return triton_do_bench, (
|
| 65 |
+
"quantiles"
|
| 66 |
+
if inspect.signature(triton_do_bench).parameters.get("quantiles")
|
| 67 |
+
is not None
|
| 68 |
+
else "percentiles"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
triton_do_bench, quantile_field_name = load_triton()
|
| 72 |
+
|
| 73 |
+
if quantile_field_name not in kwargs:
|
| 74 |
+
kwargs[quantile_field_name] = (0.5, 0.2, 0.8)
|
| 75 |
+
return triton_do_bench(*args, **kwargs)[0]
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@functools.lru_cache(None)
|
| 79 |
+
def has_triton() -> bool:
|
| 80 |
+
if not torch.cuda.is_available():
|
| 81 |
+
return False
|
| 82 |
+
try:
|
| 83 |
+
import triton
|
| 84 |
+
|
| 85 |
+
return triton is not None and get_device_capability() >= (7, 0)
|
| 86 |
+
except ImportError:
|
| 87 |
+
return False
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@functools.lru_cache(None)
|
| 91 |
+
def has_torchvision_roi_align() -> bool:
|
| 92 |
+
try:
|
| 93 |
+
from torchvision.ops import roi_align # noqa: F401
|
| 94 |
+
|
| 95 |
+
return roi_align is not None and hasattr(
|
| 96 |
+
getattr(torch.ops, "torchvision", None), "roi_align"
|
| 97 |
+
)
|
| 98 |
+
except ImportError:
|
| 99 |
+
return False
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def conditional_product(*args):
|
| 103 |
+
return functools.reduce(operator.mul, [x for x in args if x])
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def decode_device(device: Union[Optional[torch.device], str]) -> torch.device:
|
| 107 |
+
if device is None:
|
| 108 |
+
return torch.tensor(0.0).device # default device
|
| 109 |
+
if isinstance(device, str):
|
| 110 |
+
device = torch.device(device)
|
| 111 |
+
if device.type == "cuda" and device.index is None:
|
| 112 |
+
return torch.device("cuda", index=current_device())
|
| 113 |
+
return device
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def sympy_product(it):
|
| 117 |
+
return functools.reduce(operator.mul, it, sympy.Integer(1))
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def sympy_dot(seq1, seq2):
|
| 121 |
+
assert len(seq1) == len(seq2)
|
| 122 |
+
return sympy.expand(sum(a * b for a, b in zip(seq1, seq2)))
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def unique(it: Iterable[_T]) -> ValuesView[_T]:
|
| 126 |
+
return {id(x): x for x in it}.values()
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def ceildiv(numer: int, denom: int) -> int:
|
| 130 |
+
# TODO: There is a bug in a call to this function, to repro:
|
| 131 |
+
# python benchmarks/dynamo/huggingface.py --inductor -d cuda --accuracy
|
| 132 |
+
# --amp --only YituTechConvBert --dynamic-shapes
|
| 133 |
+
assert isinstance(numer, int) and isinstance(
|
| 134 |
+
denom, int
|
| 135 |
+
), f"{numer}: {type(numer)}, {denom}: {type(denom)}"
|
| 136 |
+
return -(numer // -denom)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def next_power_of_2(n: int) -> int:
|
| 140 |
+
"""Return the smallest power of 2 greater than or equal to n"""
|
| 141 |
+
assert n <= 2**32, "32-bit only"
|
| 142 |
+
n -= 1
|
| 143 |
+
n |= n >> 1
|
| 144 |
+
n |= n >> 2
|
| 145 |
+
n |= n >> 4
|
| 146 |
+
n |= n >> 8
|
| 147 |
+
n |= n >> 16
|
| 148 |
+
n += 1
|
| 149 |
+
return n
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def convert_shape_to_inductor(lst: List[Union[int, torch.SymInt]]) -> List[sympy.Expr]:
|
| 153 |
+
"""
|
| 154 |
+
Gets the shape and stride of a tensor. For non-symbolic tensors, this is
|
| 155 |
+
trivial. But for symbolic tensors, we need to map from SymIntNode into
|
| 156 |
+
sympy.Expr.
|
| 157 |
+
"""
|
| 158 |
+
return [
|
| 159 |
+
i.node.expr if isinstance(i, torch.SymInt) else sympy.Integer(i) for i in lst
|
| 160 |
+
]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def convert_shape_to_symint(
|
| 164 |
+
lst: List[Union[int, sympy.Expr]]
|
| 165 |
+
) -> List[Union[int, torch.SymInt]]:
|
| 166 |
+
"""
|
| 167 |
+
Takes a list of shapes from Inductor and converts them into symints (or just
|
| 168 |
+
ints if all shapes are static).
|
| 169 |
+
"""
|
| 170 |
+
from .virtualized import V
|
| 171 |
+
|
| 172 |
+
return [
|
| 173 |
+
i
|
| 174 |
+
if isinstance(i, int)
|
| 175 |
+
else int(i)
|
| 176 |
+
if isinstance(i, sympy.Integer)
|
| 177 |
+
else V.graph.sizevars.shape_env.create_symintnode(i, hint=None)
|
| 178 |
+
for i in lst
|
| 179 |
+
]
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def gen_gm_and_inputs(target, args, kwargs):
|
| 183 |
+
g = torch.fx.Graph()
|
| 184 |
+
g_args = []
|
| 185 |
+
a_args = []
|
| 186 |
+
for n, arg in enumerate(args):
|
| 187 |
+
if isinstance(arg, torch.Tensor):
|
| 188 |
+
g_args.append(g.placeholder(f"arg{n}"))
|
| 189 |
+
a_args.append(arg)
|
| 190 |
+
else:
|
| 191 |
+
g_args.append(arg)
|
| 192 |
+
assert all(not isinstance(x, torch.Tensor) for x in kwargs.values())
|
| 193 |
+
node = g.call_function(target, tuple(g_args), kwargs)
|
| 194 |
+
if (
|
| 195 |
+
len(target._schema.returns) == 1
|
| 196 |
+
and str(target._schema.returns[0].type) == "Tensor"
|
| 197 |
+
):
|
| 198 |
+
node = (node,)
|
| 199 |
+
g.output(node)
|
| 200 |
+
|
| 201 |
+
gm = torch.fx.GraphModule({}, g)
|
| 202 |
+
return gm, a_args
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def synchronize():
|
| 206 |
+
if torch.cuda.is_available():
|
| 207 |
+
torch.cuda.synchronize()
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def timed(model: Callable[..., Any], example_inputs, times: int = 1) -> float:
|
| 211 |
+
synchronize()
|
| 212 |
+
torch.manual_seed(1337)
|
| 213 |
+
t0 = time.perf_counter()
|
| 214 |
+
for _ in range(times):
|
| 215 |
+
result = model(*example_inputs)
|
| 216 |
+
synchronize()
|
| 217 |
+
t1 = time.perf_counter()
|
| 218 |
+
# GC the result after timing
|
| 219 |
+
assert result is not None
|
| 220 |
+
return t1 - t0
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def print_performance(fn, args=(), times=10, repeat=10, baseline=1.0):
|
| 224 |
+
timings = torch.tensor([timed(fn, args, times) for _ in range(repeat)])
|
| 225 |
+
took = torch.median(timings)
|
| 226 |
+
print(f"{took/baseline:.6f}")
|
| 227 |
+
return took
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
immutable_dict.__hash__ = lambda self: hash(tuple(self.items()))
|
| 231 |
+
immutable_list.__hash__ = lambda self: hash(tuple(self))
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def precompute_method(obj: Any, method: str):
|
| 235 |
+
"""Replace obj.method() with a new method that returns a precomputed constant."""
|
| 236 |
+
result = getattr(obj, method)()
|
| 237 |
+
setattr(obj, method, lambda: result)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def precompute_methods(obj: Any, methods: List[str]):
|
| 241 |
+
"""Replace methods with new methods that returns a precomputed constants."""
|
| 242 |
+
for method in methods:
|
| 243 |
+
precompute_method(obj, method)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def cmp(a, b) -> int:
|
| 247 |
+
return int(a > b) - int(a < b)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def pad_listlike(x, size):
|
| 251 |
+
if len(x) == 1:
|
| 252 |
+
return type(x)([x[0]]) * size
|
| 253 |
+
else:
|
| 254 |
+
return x
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def cache_on_self(fn):
|
| 258 |
+
key = f"__{fn.__name__}_cache"
|
| 259 |
+
|
| 260 |
+
@functools.wraps(fn)
|
| 261 |
+
def wrapper(self):
|
| 262 |
+
if not hasattr(self, key):
|
| 263 |
+
setattr(self, key, fn(self))
|
| 264 |
+
return getattr(self, key)
|
| 265 |
+
|
| 266 |
+
return wrapper
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def aggregate_origins(node_schedule):
|
| 270 |
+
from . import ir
|
| 271 |
+
|
| 272 |
+
if isinstance(node_schedule, list):
|
| 273 |
+
return functools.reduce(
|
| 274 |
+
operator.or_,
|
| 275 |
+
[
|
| 276 |
+
node.node.origins
|
| 277 |
+
for node in node_schedule
|
| 278 |
+
if hasattr(node, "node") and node.node
|
| 279 |
+
],
|
| 280 |
+
set(),
|
| 281 |
+
)
|
| 282 |
+
elif isinstance(node_schedule, ir.ExternKernel):
|
| 283 |
+
return node_schedule.origins
|
| 284 |
+
else:
|
| 285 |
+
return set()
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def get_fused_kernel_name(node_schedule, descriptive_names):
|
| 289 |
+
all_origins = aggregate_origins(node_schedule)
|
| 290 |
+
if descriptive_names == "original_aten":
|
| 291 |
+
# Bases the kernel name off of the top-level aten operator (i.e. pre-decompositions)
|
| 292 |
+
sources = [
|
| 293 |
+
origin.meta["original_aten"]._overloadpacket.__name__
|
| 294 |
+
for origin in all_origins
|
| 295 |
+
if origin.op == "call_function" and "original_aten" in origin.meta
|
| 296 |
+
]
|
| 297 |
+
sources = sorted(set(sources))
|
| 298 |
+
elif descriptive_names == "torch":
|
| 299 |
+
# Bases the kernel name off of the top-level "torch" operator (i.e. post-dynamo graph)
|
| 300 |
+
sources = []
|
| 301 |
+
for origin in all_origins:
|
| 302 |
+
if origin.op == "call_function" and "source_fn" in origin.meta:
|
| 303 |
+
if isinstance(origin.meta["source_fn"][1], str):
|
| 304 |
+
sources.append(origin.meta["source_fn"][1])
|
| 305 |
+
else:
|
| 306 |
+
sources.append(origin.meta["source_fn"][1].__name__)
|
| 307 |
+
sources = sorted(set(sources))
|
| 308 |
+
elif descriptive_names == "inductor_node":
|
| 309 |
+
sources = [
|
| 310 |
+
origin.name for origin in all_origins if origin.op == "call_function"
|
| 311 |
+
]
|
| 312 |
+
else:
|
| 313 |
+
raise NotImplementedError
|
| 314 |
+
sources = sources
|
| 315 |
+
return "_".join(["fused"] + sources)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def get_kernel_metadata(node_schedule, wrapper):
|
| 319 |
+
all_origins = aggregate_origins(node_schedule)
|
| 320 |
+
inductor_nodes = [origin for origin in all_origins if origin.op == "call_function"]
|
| 321 |
+
|
| 322 |
+
from_node_dict = collections.defaultdict(list)
|
| 323 |
+
original_aten_dict = collections.defaultdict(list)
|
| 324 |
+
for node in inductor_nodes:
|
| 325 |
+
if "original_aten" in node.meta:
|
| 326 |
+
key = str(node.meta["original_aten"]._overloadpacket)
|
| 327 |
+
original_aten_dict[key].append(node.name)
|
| 328 |
+
if "from_node" in node.meta:
|
| 329 |
+
key = node.meta["from_node"][0][0]
|
| 330 |
+
from_node_dict[key].append(node.name)
|
| 331 |
+
metadata = (
|
| 332 |
+
f"{wrapper.comment} Source Nodes: [{', '.join(sorted(from_node_dict.keys()))}], "
|
| 333 |
+
f"Original ATen: [{', '.join(sorted(original_aten_dict.keys()))}]"
|
| 334 |
+
)
|
| 335 |
+
# trace back to original node here
|
| 336 |
+
detailed_metadata = []
|
| 337 |
+
for original_node, nodes in sorted(from_node_dict.items()):
|
| 338 |
+
detailed_metadata.append(
|
| 339 |
+
f"{wrapper.comment} {original_node} => {', '.join(sorted(nodes))}"
|
| 340 |
+
)
|
| 341 |
+
return metadata, "\n".join(detailed_metadata)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def dominated_nodes(
|
| 345 |
+
initial_queue: Iterable[torch.fx.Node], skip_filter=None
|
| 346 |
+
) -> Set[torch.fx.Node]:
|
| 347 |
+
"""Returns the set of nodes whose values depend on those within initial_queue"""
|
| 348 |
+
initial_queue = list(initial_queue)
|
| 349 |
+
dominated_set = set(initial_queue)
|
| 350 |
+
|
| 351 |
+
while initial_queue:
|
| 352 |
+
node = initial_queue.pop()
|
| 353 |
+
for user in node.users:
|
| 354 |
+
if skip_filter and skip_filter(user):
|
| 355 |
+
continue
|
| 356 |
+
if user not in dominated_set:
|
| 357 |
+
dominated_set.add(user)
|
| 358 |
+
initial_queue.append(user)
|
| 359 |
+
|
| 360 |
+
return dominated_set
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def gather_origins(args, kwargs):
|
| 364 |
+
import itertools
|
| 365 |
+
|
| 366 |
+
from . import ir
|
| 367 |
+
|
| 368 |
+
def is_unrealized_node(n):
|
| 369 |
+
if isinstance(n, ir.TensorBox):
|
| 370 |
+
return is_unrealized_node(n.data)
|
| 371 |
+
if isinstance(n, ir.StorageBox):
|
| 372 |
+
return is_unrealized_node(n.data)
|
| 373 |
+
return isinstance(n, ir.IRNode) and isinstance(n, ir.Pointwise)
|
| 374 |
+
|
| 375 |
+
kwarg_origins = [val.origins for val in kwargs.values() if is_unrealized_node(val)]
|
| 376 |
+
arg_origins = [arg.origins for arg in args if is_unrealized_node(arg)]
|
| 377 |
+
return set(itertools.chain(*arg_origins, *kwarg_origins))
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def sympy_str(expr: sympy.Expr) -> str:
|
| 381 |
+
"""
|
| 382 |
+
Normal sympy str is very slow, this is a lot faster. The result are
|
| 383 |
+
somewhat worse, as it doesn't do as much simplification. So don't
|
| 384 |
+
use this for final codegen.
|
| 385 |
+
"""
|
| 386 |
+
if isinstance(expr, sympy.Symbol):
|
| 387 |
+
return expr.name
|
| 388 |
+
if isinstance(expr, sympy.Add):
|
| 389 |
+
return " + ".join(map(sympy_str, expr.args))
|
| 390 |
+
if isinstance(expr, sympy.Mul):
|
| 391 |
+
return " * ".join(map(sympy_str, expr.args))
|
| 392 |
+
|
| 393 |
+
if isinstance(expr, (ModularIndexing, CleanDiv, FloorDiv)):
|
| 394 |
+
return f"{expr.func.__name__}({', '.join(map(sympy_str, expr.args))})"
|
| 395 |
+
return str(expr)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def sympy_symbol(name: str) -> sympy.Symbol:
|
| 399 |
+
# This should never be used for creating shape/stride symbols, as those
|
| 400 |
+
# should all be allocated before Inductor.
|
| 401 |
+
assert name[0] != "s"
|
| 402 |
+
# NOTE: shape symbols are positive (> 0), but index variables are only
|
| 403 |
+
# non-negative (>= 0).
|
| 404 |
+
return sympy.Symbol(name, integer=True, nonnegative=True)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def sympy_subs(expr: sympy.Expr, replacements: Dict[Any, Any]) -> sympy.Expr:
|
| 408 |
+
"""
|
| 409 |
+
xreplace is faster than subs, but is way more picky
|
| 410 |
+
"""
|
| 411 |
+
|
| 412 |
+
def promote_strings(key):
|
| 413 |
+
if isinstance(key, str):
|
| 414 |
+
return sympy_symbol(key)
|
| 415 |
+
return key
|
| 416 |
+
|
| 417 |
+
return expr.xreplace(
|
| 418 |
+
{promote_strings(k): promote_strings(v) for k, v in replacements.items()}
|
| 419 |
+
)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
def free_symbol_startswith(index: sympy.Expr, prefix: str):
|
| 423 |
+
return any(v.name.startswith(prefix) for v in index.free_symbols)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def free_symbol_has(index: sympy.Expr, pattern: str):
|
| 427 |
+
return any(pattern in v.name for v in index.free_symbols)
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
def has_incompatible_cudagraph_ops(gm):
|
| 431 |
+
forbidden_set = {
|
| 432 |
+
"aten._fused_moving_avg_obs_fq_helper.default",
|
| 433 |
+
"aten._fused_moving_avg_obs_fq_helper_functional.default",
|
| 434 |
+
"aten.multinomial.default",
|
| 435 |
+
"fbgemm.dense_to_jagged.default",
|
| 436 |
+
"fbgemm.jagged_to_padded_dense.default",
|
| 437 |
+
"run_and_save_rng_state",
|
| 438 |
+
"run_with_rng_state",
|
| 439 |
+
}
|
| 440 |
+
if torch.are_deterministic_algorithms_enabled():
|
| 441 |
+
forbidden_set.update(
|
| 442 |
+
{
|
| 443 |
+
"aten._unsafe_index_put.default",
|
| 444 |
+
"aten.index_put.default",
|
| 445 |
+
"aten.index_put_.default",
|
| 446 |
+
"aten.scatter.src",
|
| 447 |
+
"aten.scatter.reduce",
|
| 448 |
+
"aten.scatter.value_reduce",
|
| 449 |
+
"aten.scatter_add_",
|
| 450 |
+
"aten.scatter_add.default",
|
| 451 |
+
"aten.scatter_reduce.two",
|
| 452 |
+
"aten.scatter_reduce_.two",
|
| 453 |
+
"aten.scatter_reduce.two_out",
|
| 454 |
+
}
|
| 455 |
+
)
|
| 456 |
+
for node in gm.graph.nodes:
|
| 457 |
+
if str(node.target) in forbidden_set:
|
| 458 |
+
return True
|
| 459 |
+
return False
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
instance_descriptor = collections.namedtuple(
|
| 463 |
+
"instance_descriptor",
|
| 464 |
+
["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"],
|
| 465 |
+
defaults=[tuple(), tuple(), tuple(), tuple()],
|
| 466 |
+
)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
@contextlib.contextmanager
|
| 470 |
+
def fresh_inductor_cache(cache_entries=None):
|
| 471 |
+
"""
|
| 472 |
+
Contextmanager that provides a clean tmp cachedir for inductor.
|
| 473 |
+
|
| 474 |
+
Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes
|
| 475 |
+
generated with this cache instance.
|
| 476 |
+
"""
|
| 477 |
+
with tempfile.TemporaryDirectory() as inductor_cache_dir:
|
| 478 |
+
with mock.patch.dict(
|
| 479 |
+
os.environ, {"TORCHINDUCTOR_CACHE_DIR": inductor_cache_dir}
|
| 480 |
+
):
|
| 481 |
+
triton_cache_dir = os.path.join(inductor_cache_dir, "triton")
|
| 482 |
+
with mock.patch.dict(os.environ, {"TRITON_CACHE_DIR": triton_cache_dir}):
|
| 483 |
+
yield
|
| 484 |
+
if isinstance(cache_entries, dict):
|
| 485 |
+
assert len(cache_entries) == 0, "expected empty cache_entries dict"
|
| 486 |
+
if os.path.exists(triton_cache_dir):
|
| 487 |
+
files = os.listdir(triton_cache_dir)
|
| 488 |
+
cache_entries.update(
|
| 489 |
+
{
|
| 490 |
+
f: os.path.getsize(os.path.join(triton_cache_dir, f))
|
| 491 |
+
for f in files
|
| 492 |
+
if ".lock" not in f
|
| 493 |
+
}
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
def argsort(seq) -> List[int]:
|
| 498 |
+
# preserve original order for equal strides
|
| 499 |
+
getter = seq.__getitem__
|
| 500 |
+
a_r = range(len(seq))
|
| 501 |
+
return list(reversed(sorted(a_r, key=getter, reverse=True))) # noqa: C413
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@functools.lru_cache(8)
|
| 505 |
+
def get_dtype_size(dtype):
|
| 506 |
+
return torch.empty((), dtype=dtype).element_size()
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
class LineContext(NamedTuple):
|
| 510 |
+
context: Any
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
class IndentedBuffer:
|
| 514 |
+
tabwidth = 4
|
| 515 |
+
|
| 516 |
+
def __init__(self, initial_indent=0):
|
| 517 |
+
self._lines = []
|
| 518 |
+
self._indent = initial_indent
|
| 519 |
+
|
| 520 |
+
def getvaluewithlinemap(self):
|
| 521 |
+
buf = StringIO()
|
| 522 |
+
p = 1
|
| 523 |
+
linemap = []
|
| 524 |
+
for line in self._lines:
|
| 525 |
+
if isinstance(line, DeferredLineBase):
|
| 526 |
+
line = line()
|
| 527 |
+
if line is None:
|
| 528 |
+
continue
|
| 529 |
+
elif isinstance(line, LineContext):
|
| 530 |
+
linemap.append((p, line.context))
|
| 531 |
+
continue
|
| 532 |
+
assert isinstance(line, str)
|
| 533 |
+
buf.write(line)
|
| 534 |
+
buf.write("\n")
|
| 535 |
+
p += 1 + line.count("\n")
|
| 536 |
+
return buf.getvalue(), linemap
|
| 537 |
+
|
| 538 |
+
def getvalue(self):
|
| 539 |
+
v, _ = self.getvaluewithlinemap()
|
| 540 |
+
return v
|
| 541 |
+
|
| 542 |
+
def getrawvalue(self):
|
| 543 |
+
buf = StringIO()
|
| 544 |
+
for line in self._lines:
|
| 545 |
+
if isinstance(line, DeferredLineBase):
|
| 546 |
+
line = line()
|
| 547 |
+
if line is None:
|
| 548 |
+
continue
|
| 549 |
+
elif isinstance(line, LineContext):
|
| 550 |
+
continue
|
| 551 |
+
assert isinstance(line, str)
|
| 552 |
+
# backslash implies line continuation
|
| 553 |
+
if line.endswith("\\"):
|
| 554 |
+
buf.write(line[:-1])
|
| 555 |
+
else:
|
| 556 |
+
buf.write(line)
|
| 557 |
+
buf.write("\n")
|
| 558 |
+
return buf.getvalue()
|
| 559 |
+
|
| 560 |
+
def clear(self):
|
| 561 |
+
self._lines.clear()
|
| 562 |
+
|
| 563 |
+
def __bool__(self):
|
| 564 |
+
return bool(self._lines)
|
| 565 |
+
|
| 566 |
+
def prefix(self):
|
| 567 |
+
return " " * (self._indent * self.tabwidth)
|
| 568 |
+
|
| 569 |
+
def writeline(self, line):
|
| 570 |
+
if isinstance(line, LineContext):
|
| 571 |
+
self._lines.append(line)
|
| 572 |
+
elif isinstance(line, DeferredLineBase):
|
| 573 |
+
self._lines.append(line.with_prefix(self.prefix()))
|
| 574 |
+
elif line.strip():
|
| 575 |
+
self._lines.append(f"{self.prefix()}{line}")
|
| 576 |
+
else:
|
| 577 |
+
self._lines.append("")
|
| 578 |
+
|
| 579 |
+
def writelines(self, lines):
|
| 580 |
+
for line in lines:
|
| 581 |
+
self.writeline(line)
|
| 582 |
+
|
| 583 |
+
def indent(self, offset=1):
|
| 584 |
+
@contextlib.contextmanager
|
| 585 |
+
def ctx():
|
| 586 |
+
self._indent += offset
|
| 587 |
+
try:
|
| 588 |
+
yield
|
| 589 |
+
finally:
|
| 590 |
+
self._indent -= offset
|
| 591 |
+
|
| 592 |
+
return ctx()
|
| 593 |
+
|
| 594 |
+
def splice(self, other_code, strip=False):
|
| 595 |
+
if isinstance(other_code, IndentedBuffer):
|
| 596 |
+
dedent = float("inf")
|
| 597 |
+
for line in other_code._lines:
|
| 598 |
+
if not isinstance(line, LineContext) and line:
|
| 599 |
+
dedent = min(dedent, len(line) - len(line.lstrip()))
|
| 600 |
+
if math.isinf(dedent):
|
| 601 |
+
dedent = 0
|
| 602 |
+
for line in other_code._lines:
|
| 603 |
+
if isinstance(line, LineContext):
|
| 604 |
+
self._lines.append(line)
|
| 605 |
+
else:
|
| 606 |
+
IndentedBuffer.writeline(self, line[int(dedent) :])
|
| 607 |
+
else:
|
| 608 |
+
other_code = textwrap.dedent(other_code)
|
| 609 |
+
if strip:
|
| 610 |
+
other_code = other_code.lstrip()
|
| 611 |
+
if not other_code:
|
| 612 |
+
return
|
| 613 |
+
other_code = other_code.rstrip()
|
| 614 |
+
for line in other_code.split("\n"):
|
| 615 |
+
self.writeline(line)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
class DeferredLineBase:
|
| 619 |
+
"""A line that can be 'unwritten' at a later time"""
|
| 620 |
+
|
| 621 |
+
def __init__(self, line):
|
| 622 |
+
if not line.strip():
|
| 623 |
+
line = ""
|
| 624 |
+
self.line = line
|
| 625 |
+
|
| 626 |
+
def __call__(self) -> Optional[str]:
|
| 627 |
+
"""Returns either self.line or None to indicate the line has been 'unwritten'"""
|
| 628 |
+
raise NotImplementedError()
|
| 629 |
+
|
| 630 |
+
def _new_line(self, line: str) -> "DeferredLineBase":
|
| 631 |
+
"""Returns a new deferred line with the same condition"""
|
| 632 |
+
raise NotImplementedError()
|
| 633 |
+
|
| 634 |
+
def with_prefix(self, prefix):
|
| 635 |
+
return self._new_line(f"{prefix}{self.line}")
|
| 636 |
+
|
| 637 |
+
def lstrip(self):
|
| 638 |
+
return self._new_line(self.line.lstrip())
|
| 639 |
+
|
| 640 |
+
def __getitem__(self, index):
|
| 641 |
+
return self._new_line(self.line[index])
|
| 642 |
+
|
| 643 |
+
def __bool__(self):
|
| 644 |
+
return bool(self.line)
|
| 645 |
+
|
| 646 |
+
def __len__(self):
|
| 647 |
+
return len(self.line)
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
@functools.lru_cache(None)
|
| 651 |
+
def is_big_gpu(index):
|
| 652 |
+
sms = torch.cuda.get_device_properties(index).multi_processor_count
|
| 653 |
+
if sms < 80: # V100
|
| 654 |
+
log.warning("not enough SMs to use max_autotune_gemm mode")
|
| 655 |
+
return False
|
| 656 |
+
return True
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def use_triton_template(layout, *, enable_int32=False):
|
| 660 |
+
layout_dtypes = [torch.float16, torch.bfloat16, torch.float32]
|
| 661 |
+
if enable_int32:
|
| 662 |
+
layout_dtypes = [torch.float16, torch.bfloat16, torch.float32, torch.int32]
|
| 663 |
+
return (
|
| 664 |
+
(
|
| 665 |
+
config.max_autotune
|
| 666 |
+
or config.max_autotune_gemm
|
| 667 |
+
or config.search_autotune_cache
|
| 668 |
+
)
|
| 669 |
+
and "TRITON" in config.max_autotune_gemm_backends.upper().split(",")
|
| 670 |
+
and layout.device.type == "cuda"
|
| 671 |
+
and layout.dtype in layout_dtypes
|
| 672 |
+
and is_big_gpu(layout.device.index or 0)
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
|
| 676 |
+
def use_aten_gemm_kernels():
|
| 677 |
+
return "ATEN" in config.max_autotune_gemm_backends.upper().split(",")
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
class DebugDirManager:
|
| 681 |
+
counter = itertools.count(0)
|
| 682 |
+
|
| 683 |
+
def __init__(self):
|
| 684 |
+
self.id = next(DebugDirManager.counter)
|
| 685 |
+
self.prev_debug_name = None
|
| 686 |
+
|
| 687 |
+
def __enter__(self):
|
| 688 |
+
self.prev_debug_name = torch._dynamo.config.debug_dir_root
|
| 689 |
+
self.new_name = f"{self.prev_debug_name}_tmp_{self.id}"
|
| 690 |
+
torch._dynamo.config.debug_dir_root = self.new_name
|
| 691 |
+
|
| 692 |
+
def __exit__(self, *args):
|
| 693 |
+
shutil.rmtree(self.new_name)
|
| 694 |
+
torch._dynamo.config.debug_dir_root = self.prev_debug_name
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def run_and_get_code(fn, *args, **kwargs):
|
| 698 |
+
from .graph import GraphLowering
|
| 699 |
+
|
| 700 |
+
compile_to_module = GraphLowering.compile_to_module
|
| 701 |
+
source_codes = []
|
| 702 |
+
|
| 703 |
+
def patched_compile_to_module(self):
|
| 704 |
+
mod = compile_to_module(self)
|
| 705 |
+
with open(mod.__file__) as f:
|
| 706 |
+
source_codes.append(f.read())
|
| 707 |
+
return mod
|
| 708 |
+
|
| 709 |
+
with mock.patch.object(
|
| 710 |
+
GraphLowering, "compile_to_module", patched_compile_to_module
|
| 711 |
+
):
|
| 712 |
+
torch._dynamo.reset()
|
| 713 |
+
result = fn(*args, **kwargs)
|
| 714 |
+
return result, source_codes
|
| 715 |
+
|
| 716 |
+
|
| 717 |
+
def run_and_get_triton_code(fn, *args, **kwargs):
|
| 718 |
+
_, source_codes = run_and_get_code(fn, *args, **kwargs)
|
| 719 |
+
# Can have two outputs if backwards was eagerly compiled
|
| 720 |
+
assert (
|
| 721 |
+
1 <= len(source_codes) <= 2
|
| 722 |
+
), f"expected one or two code outputs got {len(source_codes)}"
|
| 723 |
+
return source_codes[0]
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
@contextlib.contextmanager
|
| 727 |
+
def override_lowering(aten_op, override_fn):
|
| 728 |
+
"""
|
| 729 |
+
Override the lowering of aten_op with overide_fn.
|
| 730 |
+
The first argument of override_fn is the original lowering fn.
|
| 731 |
+
"""
|
| 732 |
+
from torch._inductor import lowering
|
| 733 |
+
|
| 734 |
+
orig_fn = lowering.lowerings[aten_op]
|
| 735 |
+
try:
|
| 736 |
+
lowering.lowerings[aten_op] = functools.partial(override_fn, orig_fn)
|
| 737 |
+
yield
|
| 738 |
+
finally:
|
| 739 |
+
lowering.lowerings[aten_op] = orig_fn
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
def add_scheduler_init_hook(pre_fn, post_fn=None):
|
| 743 |
+
"""
|
| 744 |
+
Add hook functions to be called at the beginning and end of Scheduler.__init__.
|
| 745 |
+
Used for unit tests.
|
| 746 |
+
"""
|
| 747 |
+
from torch._inductor.scheduler import Scheduler
|
| 748 |
+
|
| 749 |
+
orig_fn = Scheduler.__init__
|
| 750 |
+
|
| 751 |
+
def wrapper(scheduler, nodes):
|
| 752 |
+
pre_fn(scheduler, nodes)
|
| 753 |
+
out = orig_fn(scheduler, nodes)
|
| 754 |
+
if post_fn:
|
| 755 |
+
post_fn(scheduler, nodes)
|
| 756 |
+
return out
|
| 757 |
+
|
| 758 |
+
return unittest.mock.patch.object(Scheduler, "__init__", wrapper)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def developer_warning(msg):
|
| 762 |
+
"""
|
| 763 |
+
Warnings that will be actionable for PyTorch developers, but not
|
| 764 |
+
end users. Allows us to easily disable them in stable releases but
|
| 765 |
+
keep them on for nightly builds.
|
| 766 |
+
"""
|
| 767 |
+
if config.developer_warnings:
|
| 768 |
+
log.warning(msg)
|
| 769 |
+
else:
|
| 770 |
+
log.info(msg)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
|
| 774 |
+
"""
|
| 775 |
+
Return the total number of bytes the arguments of tensor type takes.
|
| 776 |
+
|
| 777 |
+
For in/out args, tensor sizes are counted twice: once for reading and
|
| 778 |
+
once for writing.
|
| 779 |
+
|
| 780 |
+
The first num_in_out_args arguments are in out tensors.
|
| 781 |
+
"""
|
| 782 |
+
return sum(
|
| 783 |
+
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
|
| 784 |
+
for i, arg in enumerate(args)
|
| 785 |
+
if isinstance(arg, torch.Tensor)
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix=""):
|
| 790 |
+
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
|
| 791 |
+
try:
|
| 792 |
+
import colorama
|
| 793 |
+
|
| 794 |
+
if ms > 0.012 and gb_per_s < 650:
|
| 795 |
+
info_str = colorama.Fore.RED + info_str + colorama.Fore.RESET
|
| 796 |
+
except ImportError:
|
| 797 |
+
log.warning("Colorama is not installed. Install it if you want colored output")
|
| 798 |
+
|
| 799 |
+
return info_str
|
| 800 |
+
|
| 801 |
+
|
| 802 |
+
def get_benchmark_name():
|
| 803 |
+
"""
|
| 804 |
+
An experimental API used only when config.benchmark_kernel is true.
|
| 805 |
+
|
| 806 |
+
The benchmark name is only available at codegen time. So we can not
|
| 807 |
+
directly call it in benchmark_all_kernels which is run after codegen.
|
| 808 |
+
|
| 809 |
+
The function assumes the argument after --only is the benchmark name.
|
| 810 |
+
It works for torchbench.py/hugginface.py/timm_models.py. But for ad-hoc
|
| 811 |
+
scripts, this function may return None.
|
| 812 |
+
|
| 813 |
+
There are 2 flavors of --only argument we need handle:
|
| 814 |
+
1. --only model_name
|
| 815 |
+
2. --only=model_name
|
| 816 |
+
"""
|
| 817 |
+
try:
|
| 818 |
+
idx = sys.argv.index("--only")
|
| 819 |
+
if (
|
| 820 |
+
idx + 1 < len(sys.argv)
|
| 821 |
+
and len(sys.argv[idx + 1]) > 0
|
| 822 |
+
and sys.argv[idx + 1][0] != "-"
|
| 823 |
+
):
|
| 824 |
+
return sys.argv[idx + 1]
|
| 825 |
+
except ValueError:
|
| 826 |
+
pass
|
| 827 |
+
|
| 828 |
+
for arg in sys.argv:
|
| 829 |
+
if arg.startswith("--only="):
|
| 830 |
+
return arg[len("--only=") :]
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def is_ones(items):
|
| 834 |
+
return all(x == 1 for x in items)
|
| 835 |
+
|
| 836 |
+
|
| 837 |
+
def is_zeros(items):
|
| 838 |
+
return all(x == 0 for x in items)
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
def is_cpu_device(inputs):
|
| 842 |
+
return all(
|
| 843 |
+
item.device == torch.device("cpu")
|
| 844 |
+
for item in inputs
|
| 845 |
+
if isinstance(item, torch.Tensor)
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
|
| 849 |
+
def get_sympy_Expr_dtype(val: sympy.Expr) -> torch.dtype:
|
| 850 |
+
assert isinstance(
|
| 851 |
+
val, sympy.Expr
|
| 852 |
+
), "only support sympy.Expr as input to get_sympy_Expr_dtype"
|
| 853 |
+
if val.is_integer:
|
| 854 |
+
return torch.int64
|
| 855 |
+
else:
|
| 856 |
+
return torch.float64
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
@contextlib.contextmanager
|
| 860 |
+
def maybe_profile(should_profile, *args, **kwargs):
|
| 861 |
+
if should_profile:
|
| 862 |
+
with torch.profiler.profile(*args, **kwargs) as p:
|
| 863 |
+
yield p
|
| 864 |
+
else:
|
| 865 |
+
yield
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
def triton_config_to_hashable(cfg):
|
| 869 |
+
"""
|
| 870 |
+
Convert triton config to a tuple that can uniquely identify it. We can use
|
| 871 |
+
the return value as a dictionary key.
|
| 872 |
+
"""
|
| 873 |
+
items = sorted(cfg.kwargs.items())
|
| 874 |
+
items.append(("num_warps", cfg.num_warps))
|
| 875 |
+
items.append(("num_stages", cfg.num_stages))
|
| 876 |
+
return tuple(items)
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
HAS_COLORAMA = True
|
| 880 |
+
try:
|
| 881 |
+
import colorama
|
| 882 |
+
except ImportError:
|
| 883 |
+
HAS_COLORAMA = False
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def _color_text(msg, color):
|
| 887 |
+
if not HAS_COLORAMA:
|
| 888 |
+
return msg
|
| 889 |
+
|
| 890 |
+
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
def green_text(msg):
|
| 894 |
+
return _color_text(msg, "green")
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
def yellow_text(msg):
|
| 898 |
+
return _color_text(msg, "yellow")
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
def red_text(msg):
|
| 902 |
+
return _color_text(msg, "red")
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def blue_text(msg):
|
| 906 |
+
return _color_text(msg, "blue")
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
@functools.lru_cache(None)
|
| 910 |
+
def python_type_to_schema_type():
|
| 911 |
+
from . import ir
|
| 912 |
+
|
| 913 |
+
PYTHON_TYPE_TO_SCHEMA_TYPE = {
|
| 914 |
+
torch.dtype: "int",
|
| 915 |
+
torch.device: "Device",
|
| 916 |
+
bool: "bool",
|
| 917 |
+
float: "float",
|
| 918 |
+
ir.TensorBox: "Tensor",
|
| 919 |
+
}
|
| 920 |
+
return PYTHON_TYPE_TO_SCHEMA_TYPE
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
def may_get_optional_schema_type(schema_type, is_optional_arg):
|
| 924 |
+
return f"Optional[{schema_type}]" if is_optional_arg else schema_type
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
def type_match(arg, arg_type, is_optional_arg):
|
| 928 |
+
if isinstance(arg, immutable_list):
|
| 929 |
+
if all(
|
| 930 |
+
isinstance(x, int) or (isinstance(x, sympy.Symbol) and x.is_integer)
|
| 931 |
+
for x in arg
|
| 932 |
+
):
|
| 933 |
+
may_optional_schema_type = may_get_optional_schema_type(
|
| 934 |
+
"List[int]", is_optional_arg
|
| 935 |
+
)
|
| 936 |
+
return may_optional_schema_type == str(arg_type)
|
| 937 |
+
else:
|
| 938 |
+
# TODO: add support here
|
| 939 |
+
return False
|
| 940 |
+
|
| 941 |
+
if arg.__class__ in python_type_to_schema_type():
|
| 942 |
+
schema_type = python_type_to_schema_type()[arg.__class__]
|
| 943 |
+
may_optional_schema_type = may_get_optional_schema_type(
|
| 944 |
+
schema_type, is_optional_arg
|
| 945 |
+
)
|
| 946 |
+
return may_optional_schema_type == str(arg_type)
|
| 947 |
+
|
| 948 |
+
# TODO: add support here
|
| 949 |
+
return False
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
# torch/csrc/utils/python_arg_parser.cpp:FunctionSignature::parse
|
| 953 |
+
def schema_match(schema, args, kwargs):
|
| 954 |
+
min_args = 0
|
| 955 |
+
max_pos_args = 0
|
| 956 |
+
for argument in schema.arguments:
|
| 957 |
+
if not argument.has_default_value():
|
| 958 |
+
min_args += 1
|
| 959 |
+
if not argument.kwarg_only:
|
| 960 |
+
max_pos_args += 1
|
| 961 |
+
|
| 962 |
+
nargs = len(args)
|
| 963 |
+
remaining_kwargs = len(kwargs)
|
| 964 |
+
arg_pos = 0
|
| 965 |
+
|
| 966 |
+
def args_error_message(nargs, max_pos_args, min_args):
|
| 967 |
+
if min_args != max_pos_args:
|
| 968 |
+
return f"takes from {min_args} to {max_pos_args} positional arguments but {nargs} were given"
|
| 969 |
+
else:
|
| 970 |
+
return f"takes {max_pos_args} positional arguments but {nargs} were given"
|
| 971 |
+
|
| 972 |
+
def is_optional(arg):
|
| 973 |
+
return "Optional" in str(arg.type)
|
| 974 |
+
|
| 975 |
+
def allow_none(arg):
|
| 976 |
+
return is_optional(arg) or arg.has_default_value()
|
| 977 |
+
|
| 978 |
+
assert len(args) <= max_pos_args, args_error_message(
|
| 979 |
+
len(args), max_pos_args, min_args
|
| 980 |
+
)
|
| 981 |
+
|
| 982 |
+
for argument in schema.arguments:
|
| 983 |
+
obj = None
|
| 984 |
+
is_kwd = False
|
| 985 |
+
if arg_pos < nargs:
|
| 986 |
+
if argument.kwarg_only:
|
| 987 |
+
return False
|
| 988 |
+
obj = args[arg_pos]
|
| 989 |
+
elif kwargs:
|
| 990 |
+
if argument.name in kwargs:
|
| 991 |
+
obj = kwargs[argument.name]
|
| 992 |
+
is_kwd = True
|
| 993 |
+
|
| 994 |
+
if obj is None and not allow_none(argument):
|
| 995 |
+
return False
|
| 996 |
+
|
| 997 |
+
if obj is not None:
|
| 998 |
+
expected_type = argument.type
|
| 999 |
+
if not type_match(obj, expected_type, is_optional(argument)):
|
| 1000 |
+
return False
|
| 1001 |
+
|
| 1002 |
+
if not is_kwd:
|
| 1003 |
+
arg_pos += 1
|
| 1004 |
+
elif (obj is None and is_optional(argument)) or obj is not None:
|
| 1005 |
+
remaining_kwargs -= 1
|
| 1006 |
+
|
| 1007 |
+
if remaining_kwargs > 0:
|
| 1008 |
+
return False
|
| 1009 |
+
|
| 1010 |
+
return True
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
def try_find_schema(schemas, args, kwargs):
|
| 1014 |
+
for schema in schemas:
|
| 1015 |
+
if schema_match(schema, args, kwargs):
|
| 1016 |
+
return schema
|
| 1017 |
+
|
| 1018 |
+
return None
|
| 1019 |
+
|
| 1020 |
+
|
| 1021 |
+
def get_device_tflops(dtype):
|
| 1022 |
+
from triton.testing import get_max_simd_tflops, get_max_tensorcore_tflops
|
| 1023 |
+
|
| 1024 |
+
assert dtype in (torch.float16, torch.bfloat16, torch.float32)
|
| 1025 |
+
if dtype in (torch.float16, torch.bfloat16):
|
| 1026 |
+
return get_max_tensorcore_tflops(dtype)
|
| 1027 |
+
|
| 1028 |
+
if torch.backends.cuda.matmul.allow_tf32:
|
| 1029 |
+
return get_max_tensorcore_tflops(torch.float32)
|
| 1030 |
+
else:
|
| 1031 |
+
return get_max_simd_tflops(torch.float32)
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
def get_gpu_dram_gbps():
|
| 1035 |
+
from triton.testing import get_dram_gbps
|
| 1036 |
+
|
| 1037 |
+
return get_dram_gbps()
|
| 1038 |
+
|
| 1039 |
+
|
| 1040 |
+
def is_welford_reduction(reduction_type):
|
| 1041 |
+
return reduction_type.startswith("welford")
|
| 1042 |
+
|
| 1043 |
+
|
| 1044 |
+
def reduction_num_outputs(reduction_type):
|
| 1045 |
+
return 3 if is_welford_reduction(reduction_type) else 1
|
llava_next/lib/python3.10/site-packages/torch/_inductor/wrapper_benchmark.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
import tempfile
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from torch.autograd import DeviceType
|
| 7 |
+
from .utils import create_bandwidth_info_str, do_bench, get_num_bytes
|
| 8 |
+
|
| 9 |
+
_kernel_category_choices = [
|
| 10 |
+
"pointwise",
|
| 11 |
+
"reduction",
|
| 12 |
+
"persistent_reduction",
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def get_kernel_category_by_source_code(src_code):
|
| 17 |
+
"""
|
| 18 |
+
Similar to get_kernel_category but use the source code. Call this API
|
| 19 |
+
if we have not compile the src_code to module yet.
|
| 20 |
+
"""
|
| 21 |
+
choices = [ch for ch in _kernel_category_choices if f"@{ch}" in src_code]
|
| 22 |
+
if len(choices) == 1:
|
| 23 |
+
return choices[0]
|
| 24 |
+
else:
|
| 25 |
+
return "unknown"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_kernel_category(kernel_mod):
|
| 29 |
+
"""
|
| 30 |
+
Given the module defining a triton kernel, return the category of the kernel.
|
| 31 |
+
Cateogry can be one of:
|
| 32 |
+
- pointwise
|
| 33 |
+
- reduction
|
| 34 |
+
- persistent_reduction
|
| 35 |
+
|
| 36 |
+
Currently we simply decide the category depending on what decorator is imported
|
| 37 |
+
by the kernel.
|
| 38 |
+
"""
|
| 39 |
+
choices = [ch for ch in _kernel_category_choices if ch in kernel_mod.__dict__]
|
| 40 |
+
if len(choices) == 1:
|
| 41 |
+
return choices[0]
|
| 42 |
+
else:
|
| 43 |
+
return "unknown"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def benchmark_all_kernels(benchmark_name, benchmark_all_configs):
|
| 47 |
+
"""
|
| 48 |
+
An experimental API used only when config.benchmark_kernel is true.
|
| 49 |
+
|
| 50 |
+
Run the kernel benchmarks for all the kernels cached in PyCodeCache.
|
| 51 |
+
Used in the compiled modules.
|
| 52 |
+
|
| 53 |
+
Put this method here rather than codegen it for convenience since its implementation
|
| 54 |
+
does not change based on different graph modules being compiled.
|
| 55 |
+
"""
|
| 56 |
+
from torch._inductor.codecache import PyCodeCache
|
| 57 |
+
|
| 58 |
+
def get_triton_kernel(mod):
|
| 59 |
+
from torch._inductor.triton_heuristics import CachingAutotuner
|
| 60 |
+
|
| 61 |
+
cand_list = [
|
| 62 |
+
v
|
| 63 |
+
for k, v in mod.__dict__.items()
|
| 64 |
+
if k.startswith("triton_") and isinstance(v, CachingAutotuner)
|
| 65 |
+
]
|
| 66 |
+
assert len(cand_list) == 1
|
| 67 |
+
return cand_list[0]
|
| 68 |
+
|
| 69 |
+
nfound = 0
|
| 70 |
+
for kernel_key, kernel_mod in PyCodeCache.cache.items():
|
| 71 |
+
if not hasattr(kernel_mod, "get_args") or not hasattr(kernel_mod, "call"):
|
| 72 |
+
continue
|
| 73 |
+
|
| 74 |
+
triton_kernel = get_triton_kernel(kernel_mod)
|
| 75 |
+
kernel_category = get_kernel_category(kernel_mod)
|
| 76 |
+
args = kernel_mod.get_args()
|
| 77 |
+
num_in_out_ptrs = len(
|
| 78 |
+
[
|
| 79 |
+
arg_name
|
| 80 |
+
for arg_name in triton_kernel.fn.arg_names
|
| 81 |
+
if arg_name.startswith("in_out_ptr")
|
| 82 |
+
]
|
| 83 |
+
)
|
| 84 |
+
num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9
|
| 85 |
+
|
| 86 |
+
def get_info_str(ms, n_regs, n_spills, shared, prefix=""):
|
| 87 |
+
if not any(x is None for x in [n_regs, n_spills, shared]):
|
| 88 |
+
kernel_detail_str = (
|
| 89 |
+
f" {n_regs:3} regs {n_spills:3} spills {shared:8} shared mem"
|
| 90 |
+
)
|
| 91 |
+
else:
|
| 92 |
+
kernel_detail_str = ""
|
| 93 |
+
|
| 94 |
+
gb_per_s = num_gb / (ms / 1e3)
|
| 95 |
+
return create_bandwidth_info_str(
|
| 96 |
+
ms, num_gb, gb_per_s, prefix=prefix, suffix=kernel_detail_str
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
kernel_desc = (
|
| 100 |
+
f"{benchmark_name:20} {kernel_category[:3].upper()} {kernel_key[:10]}"
|
| 101 |
+
)
|
| 102 |
+
if benchmark_all_configs:
|
| 103 |
+
assert hasattr(kernel_mod, "benchmark_all_configs")
|
| 104 |
+
bench_result = kernel_mod.benchmark_all_configs(args)
|
| 105 |
+
print(kernel_desc)
|
| 106 |
+
for launcher, ms in bench_result.items():
|
| 107 |
+
print(
|
| 108 |
+
f" {get_info_str(ms, launcher.n_regs, launcher.n_spills, launcher.shared)} @ {launcher.config}"
|
| 109 |
+
)
|
| 110 |
+
else:
|
| 111 |
+
ms = do_bench(lambda: kernel_mod.call(args), rep=40, fast_flush=True)
|
| 112 |
+
assert (
|
| 113 |
+
len(triton_kernel.launchers) == 1
|
| 114 |
+
), "Autotuner should have selected the best config"
|
| 115 |
+
launcher = triton_kernel.launchers[0]
|
| 116 |
+
print(
|
| 117 |
+
get_info_str(
|
| 118 |
+
ms,
|
| 119 |
+
launcher.n_regs,
|
| 120 |
+
launcher.n_spills,
|
| 121 |
+
launcher.shared,
|
| 122 |
+
prefix=f"{kernel_desc} ",
|
| 123 |
+
)
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
nfound += 1
|
| 127 |
+
if nfound == 0:
|
| 128 |
+
print(
|
| 129 |
+
"No kernel with benchmark functionality found. Make sure you run inductor with config.benchmark_kernel being True"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@dataclasses.dataclass
|
| 134 |
+
class ProfileEvent:
|
| 135 |
+
category: str
|
| 136 |
+
key: str
|
| 137 |
+
self_cuda_time_ms: float
|
| 138 |
+
# the benchmark is run multiple times and we average the count across all the
|
| 139 |
+
# runs. It should be an integer but define a float just in case.
|
| 140 |
+
count: float
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def parse_profile_event_list(benchmark_name, event_list, wall_time_ms, nruns):
|
| 144 |
+
def get_self_cuda_time(ev):
|
| 145 |
+
"""
|
| 146 |
+
ev.self_cuda_time_total is in microsecond. Convert to millisecond.
|
| 147 |
+
"""
|
| 148 |
+
return ev.self_cuda_time_total / 1000 / nruns
|
| 149 |
+
|
| 150 |
+
all_events = defaultdict(list)
|
| 151 |
+
|
| 152 |
+
def add_event(ev, category):
|
| 153 |
+
profile_ev = ProfileEvent(
|
| 154 |
+
category=category,
|
| 155 |
+
key=ev.key,
|
| 156 |
+
self_cuda_time_ms=get_self_cuda_time(ev),
|
| 157 |
+
count=ev.count / nruns, # average across all runs
|
| 158 |
+
)
|
| 159 |
+
all_events[category].append(profile_ev)
|
| 160 |
+
|
| 161 |
+
for ev in event_list:
|
| 162 |
+
assert not ev.is_legacy, "Don't support the legacy profiler"
|
| 163 |
+
if ev.device_type == DeviceType.CPU:
|
| 164 |
+
# ignore the event on CPU side
|
| 165 |
+
continue
|
| 166 |
+
|
| 167 |
+
category = "unknown"
|
| 168 |
+
if ev.key.startswith("triton_"):
|
| 169 |
+
if ev.key.startswith("triton_poi"):
|
| 170 |
+
category = "triton_pointwise"
|
| 171 |
+
elif ev.key.startswith("triton_red"):
|
| 172 |
+
category = "triton_reduction"
|
| 173 |
+
elif ev.key.startswith("triton_per"):
|
| 174 |
+
category = "triton_persistent_reduction"
|
| 175 |
+
else:
|
| 176 |
+
category = "triton_unknown"
|
| 177 |
+
|
| 178 |
+
add_event(ev, category)
|
| 179 |
+
|
| 180 |
+
def report_category(category, profile_events):
|
| 181 |
+
from tabulate import tabulate
|
| 182 |
+
|
| 183 |
+
profile_events.sort(key=lambda ev: ev.self_cuda_time_ms, reverse=True)
|
| 184 |
+
|
| 185 |
+
rows = []
|
| 186 |
+
total_time = 0.0
|
| 187 |
+
print(f"\n == {category} category kernels == ")
|
| 188 |
+
for ev in profile_events:
|
| 189 |
+
total_time += ev.self_cuda_time_ms
|
| 190 |
+
percent = f"{ev.self_cuda_time_ms / wall_time_ms * 100:.2f}%"
|
| 191 |
+
rows.append([ev.key[:120], ev.self_cuda_time_ms, ev.count, percent])
|
| 192 |
+
rows.append(
|
| 193 |
+
["Total", total_time, "", f"{total_time / wall_time_ms * 100:.2f}%"]
|
| 194 |
+
)
|
| 195 |
+
print(
|
| 196 |
+
tabulate(
|
| 197 |
+
rows, headers=["Kernel", "Self CUDA TIME (ms)", "Count", "Percent"]
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
return total_time
|
| 201 |
+
|
| 202 |
+
def report():
|
| 203 |
+
category_list = [
|
| 204 |
+
"triton_pointwise",
|
| 205 |
+
"triton_reduction",
|
| 206 |
+
"triton_persistent_reduction",
|
| 207 |
+
"triton_unknown",
|
| 208 |
+
"unknown",
|
| 209 |
+
]
|
| 210 |
+
assert set(all_events.keys()).issubset(
|
| 211 |
+
set(category_list)
|
| 212 |
+
), f"{list(all_events.keys())}"
|
| 213 |
+
|
| 214 |
+
per_category_wall_time = {}
|
| 215 |
+
total_cuda_ms = 0.0
|
| 216 |
+
for category in category_list:
|
| 217 |
+
if category in all_events:
|
| 218 |
+
_time = report_category(category, all_events[category])
|
| 219 |
+
per_category_wall_time[category] = _time
|
| 220 |
+
total_cuda_ms += _time
|
| 221 |
+
|
| 222 |
+
gpu_busy_percent = f"{total_cuda_ms / wall_time_ms * 100:.2f}%"
|
| 223 |
+
print(f"\nPercent of time when GPU is busy: {gpu_busy_percent}")
|
| 224 |
+
print(f"Total wall time {wall_time_ms:.3f} ms")
|
| 225 |
+
|
| 226 |
+
# output such a line so we can gather such line from all compiled modules from all
|
| 227 |
+
# benchmarks and tabulate it!
|
| 228 |
+
# Columns: benchmark_name, pointwise_percent, reduction_percent, persistent_reduction_percent,
|
| 229 |
+
# unknown_category_percent, GPU_busy_percent, wall_time_ms
|
| 230 |
+
tabulate_line = f"Output for tabulate: {benchmark_name}"
|
| 231 |
+
for category in category_list:
|
| 232 |
+
percent = (
|
| 233 |
+
f"{per_category_wall_time.get(category, 0.0) / wall_time_ms * 100:.2f}%"
|
| 234 |
+
)
|
| 235 |
+
tabulate_line += f", {percent}"
|
| 236 |
+
tabulate_line += f", {gpu_busy_percent}, {wall_time_ms:.3f}ms"
|
| 237 |
+
|
| 238 |
+
print(tabulate_line)
|
| 239 |
+
|
| 240 |
+
report()
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def compiled_module_main(benchmark_name, benchmark_compiled_module_fn):
|
| 244 |
+
"""
|
| 245 |
+
This is the function called in __main__ block of a compiled module.
|
| 246 |
+
"""
|
| 247 |
+
import argparse
|
| 248 |
+
|
| 249 |
+
parser = argparse.ArgumentParser()
|
| 250 |
+
parser.add_argument(
|
| 251 |
+
"--benchmark-kernels",
|
| 252 |
+
"-k",
|
| 253 |
+
action="store_true",
|
| 254 |
+
help="Whether to benchmark each individual kernels",
|
| 255 |
+
)
|
| 256 |
+
parser.add_argument(
|
| 257 |
+
"--benchmark-all-configs",
|
| 258 |
+
"-c",
|
| 259 |
+
action="store_true",
|
| 260 |
+
help="Whether to benchmark each individual config for a kernel",
|
| 261 |
+
)
|
| 262 |
+
parser.add_argument(
|
| 263 |
+
"--profile",
|
| 264 |
+
"-p",
|
| 265 |
+
action="store_true",
|
| 266 |
+
help="Whether to profile the compiled module",
|
| 267 |
+
)
|
| 268 |
+
args = parser.parse_args()
|
| 269 |
+
|
| 270 |
+
if args.benchmark_kernels:
|
| 271 |
+
benchmark_all_kernels(benchmark_name, args.benchmark_all_configs)
|
| 272 |
+
else:
|
| 273 |
+
times = 10
|
| 274 |
+
repeat = 10
|
| 275 |
+
wall_time_ms = (
|
| 276 |
+
benchmark_compiled_module_fn(times=times, repeat=repeat) / times * 1000
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
if not args.profile:
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
with torch.profiler.profile(record_shapes=True) as p:
|
| 283 |
+
benchmark_compiled_module_fn(times=times, repeat=repeat)
|
| 284 |
+
|
| 285 |
+
path = f"{tempfile.gettempdir()}/compiled_module_profile.json"
|
| 286 |
+
p.export_chrome_trace(path)
|
| 287 |
+
print(f"Profiling result for a compiled module of benchmark {benchmark_name}:")
|
| 288 |
+
print(f"Chrome trace for the profile is written to {path}")
|
| 289 |
+
event_list = p.key_averages(group_by_input_shape=True)
|
| 290 |
+
print(event_list.table(sort_by="self_cuda_time_total", row_limit=10))
|
| 291 |
+
parse_profile_event_list(
|
| 292 |
+
benchmark_name, event_list, wall_time_ms, times * repeat
|
| 293 |
+
)
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Sharpening, edge finding, rank filters, thresholding, etc."""
|
| 2 |
+
|
| 3 |
+
import lazy_loader as _lazy
|
| 4 |
+
|
| 5 |
+
__getattr__, __dir__, __all__ = _lazy.attach_stub(__name__, __file__)
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/__init__.pyi
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Explicitly setting `__all__` is necessary for type inference engines
|
| 2 |
+
# to know which symbols are exported. See
|
| 3 |
+
# https://peps.python.org/pep-0484/#stub-files
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
"LPIFilter2D",
|
| 7 |
+
"apply_hysteresis_threshold",
|
| 8 |
+
"butterworth",
|
| 9 |
+
"correlate_sparse",
|
| 10 |
+
"difference_of_gaussians",
|
| 11 |
+
"farid",
|
| 12 |
+
"farid_h",
|
| 13 |
+
"farid_v",
|
| 14 |
+
"filter_inverse",
|
| 15 |
+
"filter_forward",
|
| 16 |
+
"frangi",
|
| 17 |
+
"gabor",
|
| 18 |
+
"gabor_kernel",
|
| 19 |
+
"gaussian",
|
| 20 |
+
"hessian",
|
| 21 |
+
"laplace",
|
| 22 |
+
"median",
|
| 23 |
+
"meijering",
|
| 24 |
+
"prewitt",
|
| 25 |
+
"prewitt_h",
|
| 26 |
+
"prewitt_v",
|
| 27 |
+
"rank",
|
| 28 |
+
"rank_order",
|
| 29 |
+
"roberts",
|
| 30 |
+
"roberts_neg_diag",
|
| 31 |
+
"roberts_pos_diag",
|
| 32 |
+
"sato",
|
| 33 |
+
"scharr",
|
| 34 |
+
"scharr_h",
|
| 35 |
+
"scharr_v",
|
| 36 |
+
"sobel",
|
| 37 |
+
"sobel_h",
|
| 38 |
+
"sobel_v",
|
| 39 |
+
"threshold_isodata",
|
| 40 |
+
"threshold_li",
|
| 41 |
+
"threshold_local",
|
| 42 |
+
"threshold_mean",
|
| 43 |
+
"threshold_minimum",
|
| 44 |
+
"threshold_multiotsu",
|
| 45 |
+
"threshold_niblack",
|
| 46 |
+
"threshold_otsu",
|
| 47 |
+
"threshold_sauvola",
|
| 48 |
+
"threshold_triangle",
|
| 49 |
+
"threshold_yen",
|
| 50 |
+
"try_all_threshold",
|
| 51 |
+
"unsharp_mask",
|
| 52 |
+
"wiener",
|
| 53 |
+
"window",
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
from . import rank
|
| 57 |
+
from ._fft_based import butterworth
|
| 58 |
+
from ._gabor import gabor, gabor_kernel
|
| 59 |
+
from ._gaussian import difference_of_gaussians, gaussian
|
| 60 |
+
from ._median import median
|
| 61 |
+
from ._rank_order import rank_order
|
| 62 |
+
from ._sparse import correlate_sparse
|
| 63 |
+
from ._unsharp_mask import unsharp_mask
|
| 64 |
+
from ._window import window
|
| 65 |
+
from .edges import (
|
| 66 |
+
farid,
|
| 67 |
+
farid_h,
|
| 68 |
+
farid_v,
|
| 69 |
+
laplace,
|
| 70 |
+
prewitt,
|
| 71 |
+
prewitt_h,
|
| 72 |
+
prewitt_v,
|
| 73 |
+
roberts,
|
| 74 |
+
roberts_neg_diag,
|
| 75 |
+
roberts_pos_diag,
|
| 76 |
+
scharr,
|
| 77 |
+
scharr_h,
|
| 78 |
+
scharr_v,
|
| 79 |
+
sobel,
|
| 80 |
+
sobel_h,
|
| 81 |
+
sobel_v,
|
| 82 |
+
)
|
| 83 |
+
from .lpi_filter import (
|
| 84 |
+
LPIFilter2D,
|
| 85 |
+
filter_inverse,
|
| 86 |
+
filter_forward,
|
| 87 |
+
wiener,
|
| 88 |
+
)
|
| 89 |
+
from .ridges import (
|
| 90 |
+
frangi,
|
| 91 |
+
hessian,
|
| 92 |
+
meijering,
|
| 93 |
+
sato,
|
| 94 |
+
)
|
| 95 |
+
from .thresholding import (
|
| 96 |
+
apply_hysteresis_threshold,
|
| 97 |
+
threshold_isodata,
|
| 98 |
+
threshold_li,
|
| 99 |
+
threshold_local,
|
| 100 |
+
threshold_mean,
|
| 101 |
+
threshold_minimum,
|
| 102 |
+
threshold_multiotsu,
|
| 103 |
+
threshold_niblack,
|
| 104 |
+
threshold_otsu,
|
| 105 |
+
threshold_sauvola,
|
| 106 |
+
threshold_triangle,
|
| 107 |
+
threshold_yen,
|
| 108 |
+
try_all_threshold,
|
| 109 |
+
)
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/_fft_based.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import scipy.fft as fft
|
| 5 |
+
|
| 6 |
+
from .._shared.utils import _supported_float_type
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def _get_nd_butterworth_filter(
|
| 10 |
+
shape, factor, order, high_pass, real, dtype=np.float64, squared_butterworth=True
|
| 11 |
+
):
|
| 12 |
+
"""Create a N-dimensional Butterworth mask for an FFT
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
shape : tuple of int
|
| 17 |
+
Shape of the n-dimensional FFT and mask.
|
| 18 |
+
factor : float
|
| 19 |
+
Fraction of mask dimensions where the cutoff should be.
|
| 20 |
+
order : float
|
| 21 |
+
Controls the slope in the cutoff region.
|
| 22 |
+
high_pass : bool
|
| 23 |
+
Whether the filter is high pass (low frequencies attenuated) or
|
| 24 |
+
low pass (high frequencies are attenuated).
|
| 25 |
+
real : bool
|
| 26 |
+
Whether the FFT is of a real (True) or complex (False) image
|
| 27 |
+
squared_butterworth : bool, optional
|
| 28 |
+
When True, the square of the Butterworth filter is used.
|
| 29 |
+
|
| 30 |
+
Returns
|
| 31 |
+
-------
|
| 32 |
+
wfilt : ndarray
|
| 33 |
+
The FFT mask.
|
| 34 |
+
|
| 35 |
+
"""
|
| 36 |
+
ranges = []
|
| 37 |
+
for i, d in enumerate(shape):
|
| 38 |
+
# start and stop ensures center of mask aligns with center of FFT
|
| 39 |
+
axis = np.arange(-(d - 1) // 2, (d - 1) // 2 + 1) / (d * factor)
|
| 40 |
+
ranges.append(fft.ifftshift(axis**2))
|
| 41 |
+
# for real image FFT, halve the last axis
|
| 42 |
+
if real:
|
| 43 |
+
limit = d // 2 + 1
|
| 44 |
+
ranges[-1] = ranges[-1][:limit]
|
| 45 |
+
# q2 = squared Euclidean distance grid
|
| 46 |
+
q2 = functools.reduce(np.add, np.meshgrid(*ranges, indexing="ij", sparse=True))
|
| 47 |
+
q2 = q2.astype(dtype)
|
| 48 |
+
q2 = np.power(q2, order)
|
| 49 |
+
wfilt = 1 / (1 + q2)
|
| 50 |
+
if high_pass:
|
| 51 |
+
wfilt *= q2
|
| 52 |
+
if not squared_butterworth:
|
| 53 |
+
np.sqrt(wfilt, out=wfilt)
|
| 54 |
+
return wfilt
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def butterworth(
|
| 58 |
+
image,
|
| 59 |
+
cutoff_frequency_ratio=0.005,
|
| 60 |
+
high_pass=True,
|
| 61 |
+
order=2.0,
|
| 62 |
+
channel_axis=None,
|
| 63 |
+
*,
|
| 64 |
+
squared_butterworth=True,
|
| 65 |
+
npad=0,
|
| 66 |
+
):
|
| 67 |
+
"""Apply a Butterworth filter to enhance high or low frequency features.
|
| 68 |
+
|
| 69 |
+
This filter is defined in the Fourier domain.
|
| 70 |
+
|
| 71 |
+
Parameters
|
| 72 |
+
----------
|
| 73 |
+
image : (M[, N[, ..., P]][, C]) ndarray
|
| 74 |
+
Input image.
|
| 75 |
+
cutoff_frequency_ratio : float, optional
|
| 76 |
+
Determines the position of the cut-off relative to the shape of the
|
| 77 |
+
FFT. Receives a value between [0, 0.5].
|
| 78 |
+
high_pass : bool, optional
|
| 79 |
+
Whether to perform a high pass filter. If False, a low pass filter is
|
| 80 |
+
performed.
|
| 81 |
+
order : float, optional
|
| 82 |
+
Order of the filter which affects the slope near the cut-off. Higher
|
| 83 |
+
order means steeper slope in frequency space.
|
| 84 |
+
channel_axis : int, optional
|
| 85 |
+
If there is a channel dimension, provide the index here. If None
|
| 86 |
+
(default) then all axes are assumed to be spatial dimensions.
|
| 87 |
+
squared_butterworth : bool, optional
|
| 88 |
+
When True, the square of a Butterworth filter is used. See notes below
|
| 89 |
+
for more details.
|
| 90 |
+
npad : int, optional
|
| 91 |
+
Pad each edge of the image by `npad` pixels using `numpy.pad`'s
|
| 92 |
+
``mode='edge'`` extension.
|
| 93 |
+
|
| 94 |
+
Returns
|
| 95 |
+
-------
|
| 96 |
+
result : ndarray
|
| 97 |
+
The Butterworth-filtered image.
|
| 98 |
+
|
| 99 |
+
Notes
|
| 100 |
+
-----
|
| 101 |
+
A band-pass filter can be achieved by combining a high-pass and low-pass
|
| 102 |
+
filter. The user can increase `npad` if boundary artifacts are apparent.
|
| 103 |
+
|
| 104 |
+
The "Butterworth filter" used in image processing textbooks (e.g. [1]_,
|
| 105 |
+
[2]_) is often the square of the traditional Butterworth filters as
|
| 106 |
+
described by [3]_, [4]_. The squared version will be used here if
|
| 107 |
+
`squared_butterworth` is set to ``True``. The lowpass, squared Butterworth
|
| 108 |
+
filter is given by the following expression for the lowpass case:
|
| 109 |
+
|
| 110 |
+
.. math::
|
| 111 |
+
H_{low}(f) = \\frac{1}{1 + \\left(\\frac{f}{c f_s}\\right)^{2n}}
|
| 112 |
+
|
| 113 |
+
with the highpass case given by
|
| 114 |
+
|
| 115 |
+
.. math::
|
| 116 |
+
H_{hi}(f) = 1 - H_{low}(f)
|
| 117 |
+
|
| 118 |
+
where :math:`f=\\sqrt{\\sum_{d=0}^{\\mathrm{ndim}} f_{d}^{2}}` is the
|
| 119 |
+
absolute value of the spatial frequency, :math:`f_s` is the sampling
|
| 120 |
+
frequency, :math:`c` the ``cutoff_frequency_ratio``, and :math:`n` is the
|
| 121 |
+
filter `order` [1]_. When ``squared_butterworth=False``, the square root of
|
| 122 |
+
the above expressions are used instead.
|
| 123 |
+
|
| 124 |
+
Note that ``cutoff_frequency_ratio`` is defined in terms of the sampling
|
| 125 |
+
frequency, :math:`f_s`. The FFT spectrum covers the Nyquist range
|
| 126 |
+
(:math:`[-f_s/2, f_s/2]`) so ``cutoff_frequency_ratio`` should have a value
|
| 127 |
+
between 0 and 0.5. The frequency response (gain) at the cutoff is 0.5 when
|
| 128 |
+
``squared_butterworth`` is true and :math:`1/\\sqrt{2}` when it is false.
|
| 129 |
+
|
| 130 |
+
Examples
|
| 131 |
+
--------
|
| 132 |
+
Apply a high-pass and low-pass Butterworth filter to a grayscale and
|
| 133 |
+
color image respectively:
|
| 134 |
+
|
| 135 |
+
>>> from skimage.data import camera, astronaut
|
| 136 |
+
>>> from skimage.filters import butterworth
|
| 137 |
+
>>> high_pass = butterworth(camera(), 0.07, True, 8)
|
| 138 |
+
>>> low_pass = butterworth(astronaut(), 0.01, False, 4, channel_axis=-1)
|
| 139 |
+
|
| 140 |
+
References
|
| 141 |
+
----------
|
| 142 |
+
.. [1] Russ, John C., et al. The Image Processing Handbook, 3rd. Ed.
|
| 143 |
+
1999, CRC Press, LLC.
|
| 144 |
+
.. [2] Birchfield, Stan. Image Processing and Analysis. 2018. Cengage
|
| 145 |
+
Learning.
|
| 146 |
+
.. [3] Butterworth, Stephen. "On the theory of filter amplifiers."
|
| 147 |
+
Wireless Engineer 7.6 (1930): 536-541.
|
| 148 |
+
.. [4] https://en.wikipedia.org/wiki/Butterworth_filter
|
| 149 |
+
|
| 150 |
+
"""
|
| 151 |
+
if npad < 0:
|
| 152 |
+
raise ValueError("npad must be >= 0")
|
| 153 |
+
elif npad > 0:
|
| 154 |
+
center_slice = tuple(slice(npad, s + npad) for s in image.shape)
|
| 155 |
+
image = np.pad(image, npad, mode='edge')
|
| 156 |
+
fft_shape = (
|
| 157 |
+
image.shape if channel_axis is None else np.delete(image.shape, channel_axis)
|
| 158 |
+
)
|
| 159 |
+
is_real = np.isrealobj(image)
|
| 160 |
+
float_dtype = _supported_float_type(image.dtype, allow_complex=True)
|
| 161 |
+
if cutoff_frequency_ratio < 0 or cutoff_frequency_ratio > 0.5:
|
| 162 |
+
raise ValueError("cutoff_frequency_ratio should be in the range [0, 0.5]")
|
| 163 |
+
wfilt = _get_nd_butterworth_filter(
|
| 164 |
+
fft_shape,
|
| 165 |
+
cutoff_frequency_ratio,
|
| 166 |
+
order,
|
| 167 |
+
high_pass,
|
| 168 |
+
is_real,
|
| 169 |
+
float_dtype,
|
| 170 |
+
squared_butterworth,
|
| 171 |
+
)
|
| 172 |
+
axes = np.arange(image.ndim)
|
| 173 |
+
if channel_axis is not None:
|
| 174 |
+
axes = np.delete(axes, channel_axis)
|
| 175 |
+
abs_channel = channel_axis % image.ndim
|
| 176 |
+
post = image.ndim - abs_channel - 1
|
| 177 |
+
sl = (slice(None),) * abs_channel + (np.newaxis,) + (slice(None),) * post
|
| 178 |
+
wfilt = wfilt[sl]
|
| 179 |
+
if is_real:
|
| 180 |
+
butterfilt = fft.irfftn(
|
| 181 |
+
wfilt * fft.rfftn(image, axes=axes), s=fft_shape, axes=axes
|
| 182 |
+
)
|
| 183 |
+
else:
|
| 184 |
+
butterfilt = fft.ifftn(
|
| 185 |
+
wfilt * fft.fftn(image, axes=axes), s=fft_shape, axes=axes
|
| 186 |
+
)
|
| 187 |
+
if npad > 0:
|
| 188 |
+
butterfilt = butterfilt[center_slice]
|
| 189 |
+
return butterfilt
|
vlmpy310/lib/python3.10/site-packages/skimage/filters/_gabor.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from scipy import ndimage as ndi
|
| 5 |
+
|
| 6 |
+
from .._shared.utils import _supported_float_type, check_nD
|
| 7 |
+
|
| 8 |
+
__all__ = ['gabor_kernel', 'gabor']
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _sigma_prefactor(bandwidth):
|
| 12 |
+
b = bandwidth
|
| 13 |
+
# See http://www.cs.rug.nl/~imaging/simplecell.html
|
| 14 |
+
return 1.0 / np.pi * math.sqrt(math.log(2) / 2.0) * (2.0**b + 1) / (2.0**b - 1)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def gabor_kernel(
|
| 18 |
+
frequency,
|
| 19 |
+
theta=0,
|
| 20 |
+
bandwidth=1,
|
| 21 |
+
sigma_x=None,
|
| 22 |
+
sigma_y=None,
|
| 23 |
+
n_stds=3,
|
| 24 |
+
offset=0,
|
| 25 |
+
dtype=np.complex128,
|
| 26 |
+
):
|
| 27 |
+
"""Return complex 2D Gabor filter kernel.
|
| 28 |
+
|
| 29 |
+
Gabor kernel is a Gaussian kernel modulated by a complex harmonic function.
|
| 30 |
+
Harmonic function consists of an imaginary sine function and a real
|
| 31 |
+
cosine function. Spatial frequency is inversely proportional to the
|
| 32 |
+
wavelength of the harmonic and to the standard deviation of a Gaussian
|
| 33 |
+
kernel. The bandwidth is also inversely proportional to the standard
|
| 34 |
+
deviation.
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
frequency : float
|
| 39 |
+
Spatial frequency of the harmonic function. Specified in pixels.
|
| 40 |
+
theta : float, optional
|
| 41 |
+
Orientation in radians. If 0, the harmonic is in the x-direction.
|
| 42 |
+
bandwidth : float, optional
|
| 43 |
+
The bandwidth captured by the filter. For fixed bandwidth, ``sigma_x``
|
| 44 |
+
and ``sigma_y`` will decrease with increasing frequency. This value is
|
| 45 |
+
ignored if ``sigma_x`` and ``sigma_y`` are set by the user.
|
| 46 |
+
sigma_x, sigma_y : float, optional
|
| 47 |
+
Standard deviation in x- and y-directions. These directions apply to
|
| 48 |
+
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
|
| 49 |
+
rotated 90 degrees so that ``sigma_x`` controls the *vertical*
|
| 50 |
+
direction.
|
| 51 |
+
n_stds : scalar, optional
|
| 52 |
+
The linear size of the kernel is n_stds (3 by default) standard
|
| 53 |
+
deviations
|
| 54 |
+
offset : float, optional
|
| 55 |
+
Phase offset of harmonic function in radians.
|
| 56 |
+
dtype : {np.complex64, np.complex128}
|
| 57 |
+
Specifies if the filter is single or double precision complex.
|
| 58 |
+
|
| 59 |
+
Returns
|
| 60 |
+
-------
|
| 61 |
+
g : complex array
|
| 62 |
+
Complex filter kernel.
|
| 63 |
+
|
| 64 |
+
References
|
| 65 |
+
----------
|
| 66 |
+
.. [1] https://en.wikipedia.org/wiki/Gabor_filter
|
| 67 |
+
.. [2] https://web.archive.org/web/20180127125930/http://mplab.ucsd.edu/tutorials/gabor.pdf
|
| 68 |
+
|
| 69 |
+
Examples
|
| 70 |
+
--------
|
| 71 |
+
>>> from skimage.filters import gabor_kernel
|
| 72 |
+
>>> from matplotlib import pyplot as plt # doctest: +SKIP
|
| 73 |
+
|
| 74 |
+
>>> gk = gabor_kernel(frequency=0.2)
|
| 75 |
+
>>> fig, ax = plt.subplots() # doctest: +SKIP
|
| 76 |
+
>>> ax.imshow(gk.real) # doctest: +SKIP
|
| 77 |
+
>>> plt.show() # doctest: +SKIP
|
| 78 |
+
|
| 79 |
+
>>> # more ripples (equivalent to increasing the size of the
|
| 80 |
+
>>> # Gaussian spread)
|
| 81 |
+
>>> gk = gabor_kernel(frequency=0.2, bandwidth=0.1)
|
| 82 |
+
>>> fig, ax = plt.suplots() # doctest: +SKIP
|
| 83 |
+
>>> ax.imshow(gk.real) # doctest: +SKIP
|
| 84 |
+
>>> plt.show() # doctest: +SKIP
|
| 85 |
+
"""
|
| 86 |
+
if sigma_x is None:
|
| 87 |
+
sigma_x = _sigma_prefactor(bandwidth) / frequency
|
| 88 |
+
if sigma_y is None:
|
| 89 |
+
sigma_y = _sigma_prefactor(bandwidth) / frequency
|
| 90 |
+
|
| 91 |
+
if np.dtype(dtype).kind != 'c':
|
| 92 |
+
raise ValueError("dtype must be complex")
|
| 93 |
+
|
| 94 |
+
ct = math.cos(theta)
|
| 95 |
+
st = math.sin(theta)
|
| 96 |
+
x0 = math.ceil(max(abs(n_stds * sigma_x * ct), abs(n_stds * sigma_y * st), 1))
|
| 97 |
+
y0 = math.ceil(max(abs(n_stds * sigma_y * ct), abs(n_stds * sigma_x * st), 1))
|
| 98 |
+
y, x = np.meshgrid(
|
| 99 |
+
np.arange(-y0, y0 + 1), np.arange(-x0, x0 + 1), indexing='ij', sparse=True
|
| 100 |
+
)
|
| 101 |
+
rotx = x * ct + y * st
|
| 102 |
+
roty = -x * st + y * ct
|
| 103 |
+
|
| 104 |
+
g = np.empty(roty.shape, dtype=dtype)
|
| 105 |
+
np.exp(
|
| 106 |
+
-0.5 * (rotx**2 / sigma_x**2 + roty**2 / sigma_y**2)
|
| 107 |
+
+ 1j * (2 * np.pi * frequency * rotx + offset),
|
| 108 |
+
out=g,
|
| 109 |
+
)
|
| 110 |
+
g *= 1 / (2 * np.pi * sigma_x * sigma_y)
|
| 111 |
+
|
| 112 |
+
return g
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def gabor(
|
| 116 |
+
image,
|
| 117 |
+
frequency,
|
| 118 |
+
theta=0,
|
| 119 |
+
bandwidth=1,
|
| 120 |
+
sigma_x=None,
|
| 121 |
+
sigma_y=None,
|
| 122 |
+
n_stds=3,
|
| 123 |
+
offset=0,
|
| 124 |
+
mode='reflect',
|
| 125 |
+
cval=0,
|
| 126 |
+
):
|
| 127 |
+
"""Return real and imaginary responses to Gabor filter.
|
| 128 |
+
|
| 129 |
+
The real and imaginary parts of the Gabor filter kernel are applied to the
|
| 130 |
+
image and the response is returned as a pair of arrays.
|
| 131 |
+
|
| 132 |
+
Gabor filter is a linear filter with a Gaussian kernel which is modulated
|
| 133 |
+
by a sinusoidal plane wave. Frequency and orientation representations of
|
| 134 |
+
the Gabor filter are similar to those of the human visual system.
|
| 135 |
+
Gabor filter banks are commonly used in computer vision and image
|
| 136 |
+
processing. They are especially suitable for edge detection and texture
|
| 137 |
+
classification.
|
| 138 |
+
|
| 139 |
+
Parameters
|
| 140 |
+
----------
|
| 141 |
+
image : 2-D array
|
| 142 |
+
Input image.
|
| 143 |
+
frequency : float
|
| 144 |
+
Spatial frequency of the harmonic function. Specified in pixels.
|
| 145 |
+
theta : float, optional
|
| 146 |
+
Orientation in radians. If 0, the harmonic is in the x-direction.
|
| 147 |
+
bandwidth : float, optional
|
| 148 |
+
The bandwidth captured by the filter. For fixed bandwidth, ``sigma_x``
|
| 149 |
+
and ``sigma_y`` will decrease with increasing frequency. This value is
|
| 150 |
+
ignored if ``sigma_x`` and ``sigma_y`` are set by the user.
|
| 151 |
+
sigma_x, sigma_y : float, optional
|
| 152 |
+
Standard deviation in x- and y-directions. These directions apply to
|
| 153 |
+
the kernel *before* rotation. If `theta = pi/2`, then the kernel is
|
| 154 |
+
rotated 90 degrees so that ``sigma_x`` controls the *vertical*
|
| 155 |
+
direction.
|
| 156 |
+
n_stds : scalar, optional
|
| 157 |
+
The linear size of the kernel is n_stds (3 by default) standard
|
| 158 |
+
deviations.
|
| 159 |
+
offset : float, optional
|
| 160 |
+
Phase offset of harmonic function in radians.
|
| 161 |
+
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
|
| 162 |
+
Mode used to convolve image with a kernel, passed to `ndi.convolve`
|
| 163 |
+
cval : scalar, optional
|
| 164 |
+
Value to fill past edges of input if ``mode`` of convolution is
|
| 165 |
+
'constant'. The parameter is passed to `ndi.convolve`.
|
| 166 |
+
|
| 167 |
+
Returns
|
| 168 |
+
-------
|
| 169 |
+
real, imag : arrays
|
| 170 |
+
Filtered images using the real and imaginary parts of the Gabor filter
|
| 171 |
+
kernel. Images are of the same dimensions as the input one.
|
| 172 |
+
|
| 173 |
+
References
|
| 174 |
+
----------
|
| 175 |
+
.. [1] https://en.wikipedia.org/wiki/Gabor_filter
|
| 176 |
+
.. [2] https://web.archive.org/web/20180127125930/http://mplab.ucsd.edu/tutorials/gabor.pdf
|
| 177 |
+
|
| 178 |
+
Examples
|
| 179 |
+
--------
|
| 180 |
+
>>> from skimage.filters import gabor
|
| 181 |
+
>>> from skimage import data
|
| 182 |
+
>>> from matplotlib import pyplot as plt # doctest: +SKIP
|
| 183 |
+
|
| 184 |
+
>>> image = data.coins()
|
| 185 |
+
>>> # detecting edges in a coin image
|
| 186 |
+
>>> filt_real, filt_imag = gabor(image, frequency=0.6)
|
| 187 |
+
>>> fix, ax = plt.subplots() # doctest: +SKIP
|
| 188 |
+
>>> ax.imshow(filt_real) # doctest: +SKIP
|
| 189 |
+
>>> plt.show() # doctest: +SKIP
|
| 190 |
+
|
| 191 |
+
>>> # less sensitivity to finer details with the lower frequency kernel
|
| 192 |
+
>>> filt_real, filt_imag = gabor(image, frequency=0.1)
|
| 193 |
+
>>> fig, ax = plt.subplots() # doctest: +SKIP
|
| 194 |
+
>>> ax.imshow(filt_real) # doctest: +SKIP
|
| 195 |
+
>>> plt.show() # doctest: +SKIP
|
| 196 |
+
"""
|
| 197 |
+
check_nD(image, 2)
|
| 198 |
+
# do not cast integer types to float!
|
| 199 |
+
if image.dtype.kind == 'f':
|
| 200 |
+
float_dtype = _supported_float_type(image.dtype)
|
| 201 |
+
image = image.astype(float_dtype, copy=False)
|
| 202 |
+
kernel_dtype = np.promote_types(image.dtype, np.complex64)
|
| 203 |
+
else:
|
| 204 |
+
kernel_dtype = np.complex128
|
| 205 |
+
|
| 206 |
+
g = gabor_kernel(
|
| 207 |
+
frequency,
|
| 208 |
+
theta,
|
| 209 |
+
bandwidth,
|
| 210 |
+
sigma_x,
|
| 211 |
+
sigma_y,
|
| 212 |
+
n_stds,
|
| 213 |
+
offset,
|
| 214 |
+
dtype=kernel_dtype,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
filtered_real = ndi.convolve(image, np.real(g), mode=mode, cval=cval)
|
| 218 |
+
filtered_imag = ndi.convolve(image, np.imag(g), mode=mode, cval=cval)
|
| 219 |
+
|
| 220 |
+
return filtered_real, filtered_imag
|